I have implemented a neural network with Logic Regression, but the correct answer rate is too low, so I want to fix it.
Input x is shape(1,64) and y is the array of shape(1,6) in the label. I tried changing the high parameters such as changing the batch size and the number of neurons, but the correct answer rate didn't go up. Please let me know how to increase the correct answer rate.
import torch
import torch.nn asn
import numpy as np
from bindsnet.network import Network
from bindsnet.network.nodes import Input, LIFNodes
from bindsnet.network.topology import Connection
from bindsnet.network.monitors import Monitor
from sklearn.model_selection import train_test_split
from bindsnet.encoding import poisson_loader
import matplotlib.pyplot asplt
# Model Definitions
classLogisticRegression (nn.Module):
def_init__(self, input_size, num_classes):
super(LogisticRegression,self).__init__()
self.linear=nn.linear(input_size, num_classes)
self.dropout=nn.dropout(0.5)
def forward (self, x):
x = x.view (-1,64)
return self.linear(x)
network=Network(dt=1.0)
input_size=64
num_classes=6
time = 64
network=Network(dt=1.0)
_BATCH_SIZE=300
num_epochs = 6
input=input(64,shape=(1,64))
middle=LIFNodes(900,thresh=-52+torch.randn(900))
center= LIFNodes (900, threshold=-52+torch.randn(900))
final=LIFNodes(900,thresh=-52+torch.randn(900))
output=LIFNodes(6,thresh=-52+torch.randn(6))
network.add_layer(empt,name='A')
network.add_layer(middle, name='B')
network.add_layer(center,name='C')
network.add_layer(final, name='D')
network.add_layer(output,name='E')
network.add_connection (Connection(inpt, middle, w=torch.randn(inpt.n, middle.n))), 'A', 'B')
network.add_connection(connection(middle, center, w=torch.randn(middle.n, center.n))), 'B', 'C')
network.add_connection(Connection(center, final, w=torch.randn(center.n, final.n))), 'C', 'D')
network.add_connection(Connection(final, output, w=torch.randn(final.n, output.n))), 'D', 'E')
network.add_connection(connection(output,output,w=torch.randn(output.n,output.n))), 'E', 'E')
# Create Monitor for input and output layers only (record voltage and spikes)
apt_monitor=Monitor(obj=impt, state_vars=("s", "v"), time=500, )
middle_monitor=Monitor(obj=impt, state_vars=("s", "v"), time=500, )
center_monitor=Monitor(obj=impt, state_vars=("s", "v"), time=500, )
final_monitor=Monitor(obj=impt, state_vars=("s", "v"), time=500, )
out_monitor=Monitor(obj=impt, state_vars=("s", "v"), time=500, )
# Connect Monitor to the Network
network.add_monitor(monitor=impt_monitor, name="A")
network.add_monitor(monitor=middle_monitor, name="B")
network.add_monitor(monitor=center_monitor, name="C")
network.add_monitor(monitor=final_monitor, name="D")
network.add_monitor(monitor=out_monitor, name="E")
for lin network.layers:
m=Monitor(network.layers[l], state_vars=['s'], time=time)
network.add_monitor(m,name=l)
npzfile=np.load("C:/Users/name/Desktop/myo-python-1.0.4/myo-armband-nn-master/data/train_set.npz")
x = npzfile ['x']
y=npzfile['y']
x_train, x_test=train_test_split(x, test_size=0.3)
y_train,y_test=train_test_split(y,test_size=0.3)
# Convert to tensor type
x_train=torch.from_numpy(x_train).float()
y_train=torch.from_numpy(y_train).float()
x_train=torch.clamp(x_train,min=0,max=100)
loader=zip(poisson_loader(x_train*0.64, time=64), iter(y_train))
training_pairs = [ ]
for i, (datum, y_train) in enumerate (loader):
inputs = {'A': datum.repeat(time,1), 'E_b':torch.ones(time,1)}
network.run (inputs=inputs, time=time)
training_pairs.append ([network.monitors['E'].get('s') .sum(-1), y_train])
network.reset_state_variables()
if(i+1)%30 == 0:print('Train progress:(%d/900)'%(i+1))
if(i+1) == 900:print(); break
model=LogisticRegression(input_size, num_classes)
equivalent to criterion=nn.CrossEntropyLoss()#m2
optimizer=torch.optim.SGD (model.parameters(), lr=0.1)
# Spike and Label Training
for epoch in range (num_epochs):
for i,(s,y_train) in enumerate(training_pairs):
optimizer.zero_grad()
output = model(s.float().softmax(0))
y=torch.reshape(y_train, (-1,))
y_train=y_train.view(-1,6)
y_train=torch.argmax(y_train, dim=-1)
loss=criterion(output,y_train.long())
loss.backward()
optimizer.step()
x_test=torch.from_numpy(x_test).float()
y_test=torch.from_numpy(y_test).long()
x_test=torch.clamp(x_test,min=0,max=100)
loader=zip(poisson_loader(x_test*0.64, time=64), iter(y_test))
test_pairs = [ ]
for i, (datum, y_test) in enumerate (loader):
inputs = {'A': datum.repeat(time,1), 'E_b':torch.ones(time,1)}
network.run (inputs=inputs, time=time)
test_pairs.append([network.monitors['E'].get('s').sum(-1),y_test])
network.reset_state_variables()
if(i+1)%30 == 0:print('Test progress:(%d/300)'%(i+1))
if(i+1) == 300:print(); break
correct,total = 0,0
fors,y_test in test_pairs:
output=model(s.float().softmax(0));_,predicted=torch.max(output.data,1)
total + = 1
y_test=torch.argmax(y_test, dim=-1)
correct+=int(predicted==y_test.long())
acuracy=100*correct/total
print('Accuracy of logistic regression on test examples: %2f%%\n'% (100*correct/total))
Training Accuracy
Accuracy of logistic regression on train examples:23.740000%
Accuracy of logistic regression on train examples: 23.860000%
Accuracy of logistic regression on train examples—23.940000%
Accuracy of logistic regression on train examples: 23.9800%
Accuracy of logistic regression on train examples: 24.008000%
Accuracy of logistic regression on train examples: 24.030000%
Accuracy of logistic regression on train examples: 24.045714%
Accuracy of logistic regression on train examples: 24.057500%
Accuracy of logistic regression on train examples: 24.064444%
Accuracy of logistic regression on train examples: 24.070000%
Accuracy of logistic regression on train examples: 24.074545%
Accuracy of logistic regression on train examples: 24.078333%
Accuracy of logistic regression on train examples: 24.081538%
Accuracy of logistic regression on train examples: 24.084286%
:
:
:
Accuracy of logistic regression on train examples: 24.080540%
You may have already tried it, but this is the kind of work that suddenly occurred to you.
·Follow the settings in the previous document dealing with similar issues
·If the number of data is not that large, make nn.Dropout (0.5) a little smaller and make it between 0.1 and 0.2
·Accurability is on the rise, so turn it a little longer
·If you haven't done it yet, normalize the input data (e.g., subtract the mean and divide by the variance).
·Change the optimizer.
© 2024 OneMinuteCode. All rights reserved.