I'm trying to make a self-encoder with Python using a Chainer.I wrote and executed the following code:
class Autoencoder (Chain):
def__init__(self):
super().__init__()
with self.init_scope():
self.l1 = L. Linear (3,2)
self.l2 = L. Linear(2,3)
def__call__(self, x):
h1 = self.l1(x)
h2 = self.l2(h1)
return 2
class Dataset(dataset.DatasetMixin):
def_init__(self, number_of_data, show_initial=False):
noise_level = 1
self.data=np.zeros(number_of_data,3), dtype=np.float32)
OA_vector=np.array ([3,2,1])
OB_vector=np.array ([2,-1,1])
t=np.random.uniform (-0.5, 0.5, number_of_data)
s=np.random.uniform (-0.5, 0.5, number_of_data)
for i in range (0, number_of_data):
noise=np.random.uniform (-noise_level, noise_level, 3)
self.data[i]=t[i]*OA_vector+s[i]*OB_vector+noise
def__len__(self):
return self.data.shape [0]
def get_example(self,idx):
return self.data [idx]
if__name__=="__main__":
n_epoch = 5
batch_size=100
number_of_data = 1000# Number of data
train_data = Dataset(number_of_data, False)
model=Autoencoder()
optimizer=optimizers.SGD(lr=0.05).setup(model)
train_iter=iterators.SerialIterator(train_data,batch_size)
updater=training.StandardUpdater(train_iter, optimizer, device=0)
trainer=training.Trainer(updater,(n_epoch, "epoch"), out="result")
trainer.run()
Dataset(number_of_data, False) is a function that obtains a three-dimensional vector of the number_of_data number.If False is set to True, the data obtained can be visualized.
The following error occurs when running:What is the cause?
Exception in main training loop: Unsupported type <class 'NoneType' >
Traceback (most recent call last):
File"/home/****/.local/lib/python 3.5/site-packages/chainer/training/trainer.py", line 308, in run
update()
File"/home/****/.local/lib/python 3.5/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File"/home/****/.local/lib/python 3.5/site-packages/chainer/training/updaters/standard_updater.py", line 164, in update_core
optimizer.update(loss_func, in_arrays)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/optimizer.py", line 655, in update
loss.backward(loss_scale=self._loss_scale)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/variable.py", line966, inbackward
self._backward_main(retain_grad, loss_scale)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/variable.py", line 1095, in_backward_main
target_input_indexes, out_grad, in_grad)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/function_node.py", line548, inbackward_accumulate
gxs=self.backward(target_input_indexes, grad_outputs)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/functions/activation/relu.py", line 73, inbackward
return ReLUGrad2(y).apply(gy,))
File"/home/****/.local/lib/python 3.5/site-packages/chainer/function_node.py", line 258, apply
outputs=self.forward(in_data)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/function_node.py", line 368, in forward
return self.forward_cpu(inputs)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/functions/activation/relu.py", line 97, in forward_cpu
y = (self.b>0) * inputs [0]
File "cupy/core/core.pyx", line 1310, incupy.core.core.ndarray.__mul_
File "cupy/core/elementwise.pxi", line 753, incupy.core.core.ufunc.__call__
File "cupy/core/elementwise.pxi", line 68, incupy.core.core._preprocess_args
Will finalize trainer extensions and updater before re-raising the exception.
Traceback (most recent call last):
File "AC.py", line 71, in<module>
trainer.run()
File"/home/****/.local/lib/python 3.5/site-packages/chainer/training/trainer.py", line 322, in run
Six.release (*sys.exc_info())
File"/home/****/.local/lib/python 3.5/site-packages/six.py", line 693, in erase
raise value
File"/home/****/.local/lib/python 3.5/site-packages/chainer/training/trainer.py", line 308, in run
update()
File"/home/****/.local/lib/python 3.5/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File"/home/****/.local/lib/python 3.5/site-packages/chainer/training/updaters/standard_updater.py", line 164, in update_core
optimizer.update(loss_func, in_arrays)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/optimizer.py", line 655, in update
loss.backward(loss_scale=self._loss_scale)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/variable.py", line966, inbackward
self._backward_main(retain_grad, loss_scale)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/variable.py", line 1095, in_backward_main
target_input_indexes, out_grad, in_grad)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/function_node.py", line548, inbackward_accumulate
gxs=self.backward(target_input_indexes, grad_outputs)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/functions/activation/relu.py", line 73, inbackward
return ReLUGrad2(y).apply(gy,))
File"/home/****/.local/lib/python 3.5/site-packages/chainer/function_node.py", line 258, apply
outputs=self.forward(in_data)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/function_node.py", line 368, in forward
return self.forward_cpu(inputs)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/functions/activation/relu.py", line 97, in forward_cpu
y = (self.b>0) * inputs [0]
File "cupy/core/core.pyx", line 1310, incupy.core.core.ndarray.__mul_
File "cupy/core/elementwise.pxi", line 753, incupy.core.core.ufunc.__call__
File "cupy/core/elementwise.pxi", line 68, incupy.core.core._preprocess_args
TypeError: Unsupported type <class 'NoneType' >
By the way, if you comment out trainer.run(), you won't get an error (of course, you won't start learning…)
There is a cupy error, so I thought it might be related to GPU.
updater=training.StandardUpdater(train_iter, optimizer, device=-1)
Now that I've tried it,
Exception in main training loop: unsupported operand type(s) for*: 'bool' and 'NoneType'
Traceback (most recent call last):
File"/home/****/.local/lib/python 3.5/site-packages/chainer/training/trainer.py", line 308, in run
update()
File"/home/****/.local/lib/python 3.5/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File"/home/****/.local/lib/python 3.5/site-packages/chainer/training/updaters/standard_updater.py", line 164, in update_core
optimizer.update(loss_func, in_arrays)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/optimizer.py", line 655, in update
loss.backward(loss_scale=self._loss_scale)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/variable.py", line966, inbackward
self._backward_main(retain_grad, loss_scale)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/variable.py", line 1095, in_backward_main
target_input_indexes, out_grad, in_grad)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/function_node.py", line548, inbackward_accumulate
gxs=self.backward(target_input_indexes, grad_outputs)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/functions/activation/relu.py", line 73, inbackward
return ReLUGrad2(y).apply(gy,))
File"/home/****/.local/lib/python 3.5/site-packages/chainer/function_node.py", line 258, apply
outputs=self.forward(in_data)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/function_node.py", line 368, in forward
return self.forward_cpu(inputs)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/functions/activation/relu.py", line 97, in forward_cpu
y = (self.b>0) * inputs [0]
Will finalize trainer extensions and updater before re-raising the exception.
Traceback (most recent call last):
File "AC.py", line 70, in<module>
trainer.run()
File"/home/****/.local/lib/python 3.5/site-packages/chainer/training/trainer.py", line 322, in run
Six.release (*sys.exc_info())
File"/home/****/.local/lib/python 3.5/site-packages/six.py", line 693, in erase
raise value
File"/home/****/.local/lib/python 3.5/site-packages/chainer/training/trainer.py", line 308, in run
update()
File"/home/****/.local/lib/python 3.5/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File"/home/****/.local/lib/python 3.5/site-packages/chainer/training/updaters/standard_updater.py", line 164, in update_core
optimizer.update(loss_func, in_arrays)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/optimizer.py", line 655, in update
loss.backward(loss_scale=self._loss_scale)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/variable.py", line966, inbackward
self._backward_main(retain_grad, loss_scale)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/variable.py", line 1095, in_backward_main
target_input_indexes, out_grad, in_grad)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/function_node.py", line548, inbackward_accumulate
gxs=self.backward(target_input_indexes, grad_outputs)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/functions/activation/relu.py", line 73, inbackward
return ReLUGrad2(y).apply(gy,))
File"/home/****/.local/lib/python 3.5/site-packages/chainer/function_node.py", line 258, apply
outputs=self.forward(in_data)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/function_node.py", line 368, in forward
return self.forward_cpu(inputs)
File"/home/****/.local/lib/python 3.5/site-packages/chainer/functions/activation/relu.py", line 97, in forward_cpu
y = (self.b>0) * inputs [0]
TypeError: unsupported operand type(s) for*: 'bool' and 'NoneType'
The error appears.
python machine-learning deep-learning chainer
Error due to lack of correct answer data and loss function.
If you write without using trainer
for batch training_iter:
put con=chain.dataset.concat_examples(batch)#dataset[0],dataset[1],... in one array
put con=chain.Variable(con)#array into chain.Variable
pred=model(con)# guess
loss=F.mean_absolute_error(pred,con)#Compare with guesses and correct answer data (in the case of AE itself)
model.cleargrads()#Initialize previously calculated gradient
calculate gradient for loss.backward()#loss
Optimize the model based on optimizer.update()#gradient
Learning can be accomplished through the loop.
If you want to use trainer, use chianer.links.Classifier code (https://docs.chainer.org/en/stable/reference/generated/chainer.links.Classifier.html) to create a new model that connects model and loss function.
For example:
from chain import reporter
class AEWrapper (Chain):
def_init__(self, predictor,
lossfun=F.mean_absolute_error, ):
super(AEWrapper,self).__init__()
self.lossfun=lossfun
with self.init_scope():
self.predictor=predictor
def forward (self, *args, **kwargs):
self.y = None
self.loss=None
self.y = self.predictor(*args)
self.loss=self.lossfun(self.y, *args)
reporter.report({'loss':self.loss},self)
return self.loss
By the way, according to J.J. Sakurai's code, both the learning results and the process will disappear when finished, so I think it's better to take logs and print them properly.
n_epoch=5
batch_size=100
number_of_data = 1000# Number of data
train_data = Dataset(number_of_data, False)
model=Autoencoder()
model=AEWrapper(model)
optimizer=chainer.optimizers.SGD(lr=0.05).setup(model)
train_iter=chain.iterators.SerialIterator(train_data,batch_size)
updater=chainer.training.StandardUpdater(train_iter, optimizer, device=0)
trainer=chainer.training.Trainer(updater, (n_epoch, "epoch"), out="result")
trainer.extend(extensions.LogReport(keys=["main/loss"],
trigger=training.trigger.IntervalTrigger(1, 'epoch'))))
trainer.extend(extensions.PrintReport(['epoch','main/loss'],
trigger = training.trigger.IntervalTrigger(1, 'epoch')
trainer.extend(extensions.snapshot_object(model,
'model_{.updater.iteration}.npz'), trigger=(1, 'epoch')
trainer.run()
>
epoch main/loss
1 1.31194
2 1.0665
3 0.883515
4 0.743101
5 0.635571
© 2024 OneMinuteCode. All rights reserved.