I'm a beginner.
Introduction to Deep Learning with TensorFlow
I'm studying with a book called
If you fiddle with the textbook code a little bit, define the class in Tensorflow and use saver.save, it doesn't work for some reason.
The resulting saved file has a larger capacity of model.ckpt.index and model.ckpt.meta than the one who wrote it without defining a class (see below).
The problem is then
saver.restore(sess, "./tmp/model.ckpt")
So when you try to read the data, the former doesn't work and the latter does (although the latter is the code in the book, so it's only natural that it works...).
I use 3.6 for python.
I don't know why, so I've been stalling here for about two weeks.
Thank you for your cooperation.
1) Run with defined class code (see below)
File and size stored in saver.save.
77 May 29 23:27 checkpoint
38675264 May 29 23:27 model.ckpt.data-00000-of-00001
720 May 29 23:27 model.ckpt.index
55104 May 29 23:27 model.ckpt.meta
2) Run with code without class definition (see below)
File and size stored in saver.save.
77 May 29 23:35 checkpoint
38675264 May 29 23:35 model.ckpt.data-00000-of-00001
638 May 29 23:35 model.ckpt.index
43869 May 29 23:35 model.ckpt.meta
For the time being, both process optimization and look at the last three outputs.
1)
in
Step: 1800, Loss: 660.897339, Accuracy: 0.980500
Step: 1900, Loss: 671.345215, Accuracy: 0.978200
Step: 2000, Loss: 653.030029, Accuracy: 0.980000
2)
in
Step:1800, Loss:657.612671, Accuracy: 0.980100
Step: 1900, Loss: 795.096313, Accuracy: 0.976200
Step: 2000, Loss: 720.260742, Accuracy: 0.978500
1) Class Defined Code
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
np.random.seed (20160703)
tf.set_random_seed (20160703)
mnist=input_data.read_data_sets("/tmp/data/", one_hot=True)
class SingleCNN:
def_init__(self, num_filters, num_units):
with tf.Graph().as_default():
self.prepare_model(num_filters, num_units)
self.prepare_session()
def prepare_model(self, num_filters, num_units):
num_units1 = 14*14*num_filters
num_units2 = num_units
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None,784], name = 'input')
x_image=tf.reshape(x, [-1,28,28,1])
with tf.name_scope('convolution'):
W_conv=tf.Variable(
tf.truncated_normal([5,5,1,num_filters], stddev=0.1),
name = 'conv-filter')
h_conv = tf.nn.conv2d(
x_image, W_conv, strides=[1,1,1,1], padding='SAME',
name = 'filter-output')
with tf.name_scope('pooling'):
h_pool=tf.nn.max_pool(h_conv,ksize=[1,2,2,1],
strides = [1, 2, 2, 1], padding = 'SAME',
name = 'max-pool')
h_pool_flat = tf.reshape(h_pool, [-1,14*14*num_filters],
name = 'pool-output')
with tf.name_scope('fully-connected'):
w2 = tf.Variable(tf.truncated_normal([num_units1, num_units2]))
b2 = tf.Variable(tf.zeros([num_units2]))
hidden2 = tf.nn.relu(tf.matmul(h_pool_flat,w2) + b2,
name = 'fc-output')
with tf.name_scope('softmax'):
w0 = tf.Variable(tf.zeros([num_units2,10]))
b0 = tf.Variable(tf.zeros([10]))
p=tf.nn.softmax(tf.matmul(hidden2,w0)+b0,
name = 'softmax-output')
with tf.name_scope('optimizer'):
t=tf.placeholder(tf.float32, [None, 10], name='labels')
loss=-tf.reduce_sum(t*tf.log(p), name='loss')
train_step=tf.train.AdamOptimizer(0.0005).minimize(loss)
with tf.name_scope('evaluator'):
correct_prediction=tf.equal(tf.argmax(p,1), tf.argmax(t,1))
acuracy=tf.reduce_mean(tf.cast(correct_prediction,
tf.float32), name='accuracy')
# tf.scalar_summary ("loss", loss)
# tf.scalar_summary("accuracy",accuracy)
tf.summary.scalar("loss", loss)
tf.summary.scalar("accuracy",accuracy)
# tf.histogram_summary("convolution_filters", W_conv)
tf.summary.histogram("convolution_filters", W_conv)
self.x, self.t, self.p = x, t, p
self.train_step=train_step
self.loss=loss
self.accuracy=accuracy
def prepare_session(self):
sess=tf.InteractiveSession()
# sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
saver=tf.train.Saver()
# summary = tf.merge_all_summary()
summary=tf.summary.merge_all()
# writer = tf.train.SummaryWriter("/tmp/mnist_df_logs", sess.graph)
writer=tf.summary.FileWriter("/tmp/mnist_df_logs", sess.graph)
self.sess = sess
self.summary=summary
self.writer = writer
self.saver=saver
cnn = SingleCNN (16,1024)
i = 0
for_in range (2000):
i+=1
batch_xs, batch_ts =mnist.train.next_batch(100)
cnn.sess.run(cnn.train_step, feed_dict={cnn.x:batch_xs, cnn.t:batch_ts})
if(i%100==0)&(i>100):
summary, loss_val, acc_val = cnn.sess.run(
[cnn.summary, cnn.loss, cnn.accuracy],
feed_dict = {cnn.x:mnist.test.images, cnn.t:mnist.test.labels})
print('Step:%d, Loss:%f, Accuracy:%f'
% (i,loss_val,acc_val))
# cnn.writer.add_summary(summary,i)
# 'mdc_session', global_step=i)
cnn.saver.save (cnn.sess, "/tmp/model.ckpt")
#!rm-rf/tmp/mnist_df_logs
# tensorboard --logdir=/tmp/mnist_df_logs
2) Code without class definition
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
np.random.seed (20160703)
tf.set_random_seed (20160703)
mnist=input_data.read_data_sets("/tmp/data/", one_hot=True)
num_filters = 16
x = tf.placeholder (tf.float32, [None,784])
x_image=tf.reshape(x, [-1,28,28,1])
W_conv=tf.Variable(tf.truncated_normal([5,5,1,num_filters],
stddev = 0.1)
h_conv = tf.nn.conv2d(x_image, W_conv,
strides=[1,1,1,1], padding='SAME')
h_pool=tf.nn.max_pool(h_conv,ksize=[1,2,2,1],
strides=[1,2,2,1], padding='SAME')
h_pool_flat=tf.reshape(h_pool, [-1,14*14*num_filters])
num_units1 = 14*14*num_filters
num_units2 = 1024
w2 = tf.Variable(tf.truncated_normal([num_units1, num_units2]))
b2 = tf.Variable(tf.zeros([num_units2]))
hidden2 = tf.nn.relu(tf.matmul(h_pool_flat,w2)+b2)
w0 = tf.Variable(tf.zeros([num_units2,10]))
b0 = tf.Variable(tf.zeros([10]))
p=tf.nn.softmax(tf.matmul(hidden2,w0)+b0)
t=tf.placeholder(tf.float32, [None, 10])
loss=-tf.reduce_sum(t*tf.log(p))
train_step=tf.train.AdamOptimizer(0.0005).minimize(loss)
correct_prediction=tf.equal(tf.argmax(p,1), tf.argmax(t,1))
acuracy=tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess=tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
#saver=tf.train.Saver(write_version=saver_pb2.SaverDef.V1)
saver=tf.train.Saver()
i = 0
for_in range (2000):
i+=1
batch_xs, batch_ts =mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x:batch_xs, t:batch_ts})
if(i%100==0)&(i>1800):
loss_val, acc_val = sess.run([loss,accuracy],
feed_dict={x:mnist.test.images, t:mnist.test.labels})
print('Step:%d, Loss:%f, Accuracy:%f'
% (i,loss_val,acc_val))
saver.save(sess, "./tmp/model.ckpt")
Code to read and test
import tensorflow as tf
import numpy as np
import matplotlib.pyplot asplt
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets("/tmp/data/", one_hot=True)
num_filters = 16
x = tf.placeholder (tf.float32, [None,784])
x_image=tf.reshape(x, [-1,28,28,1])
W_conv=tf.Variable(tf.truncated_normal([5,5,1,num_filters],stddev=0.1))
h_conv = tf.nn.conv2d(x_image, W_conv,
strides=[1,1,1,1], padding='SAME')
h_pool=tf.nn.max_pool(h_conv,ksize=[1,2,2,1],
strides=[1,2,2,1], padding='SAME')
h_pool_flat=tf.reshape(h_pool, [-1,14*14*num_filters])
num_units1 = 14*14*num_filters
num_units2 = 1024
w2 = tf.Variable(tf.truncated_normal([num_units1, num_units2]))
b2 = tf.Variable(tf.zeros([num_units2]))
hidden2 = tf.nn.relu(tf.matmul(h_pool_flat,w2)+b2)
w0 = tf.Variable(tf.zeros([num_units2,10]))
b0 = tf.Variable(tf.zeros([10]))
p=tf.nn.softmax(tf.matmul(hidden2,w0)+b0)
t=tf.placeholder(tf.float32, [None, 10])
loss=-tf.reduce_sum(t*tf.log(p))
train_step=tf.train.AdamOptimizer(0.0005).minimize(loss)
correct_prediction=tf.equal(tf.argmax(p,1), tf.argmax(t,1))
acuracy=tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess=tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
saver=tf.train.Saver()
saver.restore(sess, "/tmp/model.ckpt")
filter_vals, conv_vals, pool_vals = sess.run(
[W_conv, h_conv, h_pool], feed_dict={x:mnist.test.images[:9]})
config=plt.figure(figsize=(10,num_filters+1))
for i in range (num_filters):
subplot=fig.add_subplot(num_filters+1,10,10*(i+1)+1)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow (filter_vals[:, :, 0, i],
cmap=plt.cm.gray_r, interpolation='nearest')
for i in range (9):
subplot=fig.add_subplot(num_filters+1,10,i+2)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.set_title('%d'%np.argmax(mnist.test.labels[i]))
subplot.imshow(mnist.test.images[i].reshape(28,28),
vmin = 0, vmax = 1,
cmap=plt.cm.gray_r, interpolation='nearest')
for fin range (num_filters):
subplot=fig.add_subplot(num_filters+1,10,10*(f+1)+i+2)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow(conv_vals[i, :, :, f],
cmap=plt.cm.gray_r, interpolation='nearest')
config.show()
config=plt.figure(figsize=(10,num_filters+1))
for i in range (num_filters):
subplot=fig.add_subplot(num_filters+1,10,10*(i+1)+1)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow (filter_vals[:, :, 0, i],
cmap=plt.cm.gray_r, interpolation='nearest')
for i in range (9):
subplot=fig.add_subplot(num_filters+1,10,i+2)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.set_title('%d'%np.argmax(mnist.test.labels[i]))
subplot.imshow(mnist.test.images[i].reshape(28,28),
vmin = 0, vmax = 1,
cmap=plt.cm.gray_r, interpolation='nearest')
for fin range (num_filters):
subplot=fig.add_subplot(num_filters+1,10,10*(f+1)+i+2)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow(pool_vals[i, :, :, f],
cmap=plt.cm.gray_r, interpolation='nearest')
config.show()
input("Please enter something"")
I don't understand the content of "It doesn't work."Error?Other?
For example,
from (file defining class) import SingleCNN,mnist
cnn = SingleCNN (16,1024)
cnn.saver.restore (cnn.sess, "/model.ckpt")
print('Accuracy: %f'%(cnn.sess.run(cnn.accuracy, feed_dict={cnn.x:mnist.test.images, cnn.t:mnist.test.labels})))
I was able to confirm that the Accuracy of the last model I saved will appear.
I fixed the odd indentation.
(simple_cnn.py)
import tensorflow as tf
class SingleCNN:
def_init__(self, num_filters, num_units):
with tf.Graph().as_default():
self.prepare_model(num_filters, num_units)
self.prepare_session()
def prepare_model(self, num_filters, num_units):
num_units1 = 14*14*num_filters
num_units2 = num_units
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None,784], name = 'input')
x_image=tf.reshape(x, [-1,28,28,1])
with tf.name_scope('convolution'):
W_conv=tf.Variable(
tf.truncated_normal([5,5,1,num_filters], stddev=0.1),
name = 'conv-filter')
h_conv = tf.nn.conv2d(
x_image, W_conv, strides=[1,1,1,1], padding='SAME',
name = 'filter-output')
with tf.name_scope('pooling'):
h_pool=tf.nn.max_pool(h_conv,ksize=[1,2,2,1],
strides = [1, 2, 2, 1], padding = 'SAME',
name = 'max-pool')
h_pool_flat = tf.reshape(h_pool, [-1,14*14*num_filters],
name = 'pool-output')
with tf.name_scope('fully-connected'):
w2 = tf.Variable(tf.truncated_normal([num_units1, num_units2]))
b2 = tf.Variable(tf.zeros([num_units2]))
hidden2 = tf.nn.relu(tf.matmul(h_pool_flat,w2) + b2,
name = 'fc-output')
with tf.name_scope('softmax'):
w0 = tf.Variable(tf.zeros([num_units2,10]))
b0 = tf.Variable(tf.zeros([10]))
p=tf.nn.softmax(tf.matmul(hidden2,w0)+b0,
name = 'softmax-output')
with tf.name_scope('optimizer'):
t=tf.placeholder(tf.float32, [None, 10], name='labels')
loss=-tf.reduce_sum(t*tf.log(p), name='loss')
train_step=tf.train.AdamOptimizer(0.0005).minimize(loss)
with tf.name_scope('evaluator'):
correct_prediction=tf.equal(tf.argmax(p,1), tf.argmax(t,1))
acuracy=tf.reduce_mean(tf.cast(correct_prediction,
tf.float32), name='accuracy')
# tf.scalar_summary ("loss", loss)
# tf.scalar_summary("accuracy",accuracy)
tf.summary.scalar("loss", loss)
tf.summary.scalar("accuracy",accuracy)
# tf.histogram_summary("convolution_filters", W_conv)
tf.summary.histogram("convolution_filters", W_conv)
self.x, self.t, self.p = x, t, p
self.train_step=train_step
self.loss=loss
self.accuracy=accuracy
def prepare_session(self):
sess=tf.InteractiveSession()
# sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
saver=tf.train.Saver()
# summary=tf.merge_all_summary()
summary=tf.summary.merge_all()
# writer = tf.train.SummaryWriter("/tmp/mnist_df_logs", sess.graph)
writer=tf.summary.FileWriter("./tmp/mnist_df_logs", sess.graph)
self.sess = sess
self.summary=summary
self.writer = writer
self.saver=saver
(train.py)
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
from simple_cnn import SingleCNN
if__name__=='__main__':
mnist=input_data.read_data_sets("./tmp/data/", one_hot=True)
np.random.seed (20160703)
tf.set_random_seed (20160703)
cnn = SingleCNN (16,1024)
i = 0
for_in range (2000):
i+=1
batch_xs, batch_ts =mnist.train.next_batch(100)
cnn.sess.run(cnn.train_step, feed_dict={cnn.x:batch_xs, cnn.t:batch_ts})
if i%100 == 0:
summary, loss_val, acc_val = cnn.sess.run(
[cnn.summary, cnn.loss, cnn.accuracy],
feed_dict = {cnn.x:mnist.test.images, cnn.t:mnist.test.labels})
print('Step:%d, Loss:%f, Accuracy:%f'
% (i,loss_val,acc_val))
# cnn.writer.add_summary(summary, i)
# 'mdc_session', global_step=i)
cnn.saver.save (cnn.sess, "./tmp/model.ckpt")
cnn.saver.save (cnn.sess, "./tmp/model.ckpt")
print('Accuracy: %f'%(cnn.sess.run(cnn.accuracy, feed_dict={cnn.x:mnist.test.images, cnn.t:mnist.test.labels})))
(load_model.py)
from tensorflow.examples.tutorials.mnist import input_data
from simple_cnn import SingleCNN
if__name__=='__main__':
mnist=input_data.read_data_sets("./tmp/data/", one_hot=True)
cnn = SingleCNN (16,1024)
cnn.saver.restore(cnn.sess, "./tmp/model.ckpt")
print('Accuracy: %f'%(cnn.sess.run(cnn.accuracy, feed_dict={cnn.x:mnist.test.images, cnn.t:mnist.test.labels})))
You can load the last saved model without any problems.
Isn't it just that you didn't save it at the end?
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from simple_cnn import SingleCNN
mnist=input_data.read_data_sets("./tmp/data/", one_hot=True)
cnn = SingleCNN (16,1024)
cnn.saver.restore(cnn.sess, "./tmp/model.ckpt")
for idx in range (10):
result=cnn.sess.run(tf.argmax(cnn.p,1), feed_dict={cnn.x:np.array([mnist.test.images [idx]])})
print('estimated{}, correct{}'.format(result, np.argmax(mnist.test.labels[idx])))
estimated [7], correct7
estimated [2], correct2
estimated[1], correct1
estimated [0], correct0
estimated [4], correct4
estimated[1], correct1
estimated [4], correct4
estimated [9], correct9
estimated [6], correct5
estimated [9], correct9
There seems to be no problem.
573 rails db:create error: Could not find mysql2-0.5.4 in any of the sources
578 Understanding How to Configure Google API Key
581 PHP ssh2_scp_send fails to send files as intended
915 When building Fast API+Uvicorn environment with PyInstaller, console=False results in an error
618 Uncaught (inpromise) Error on Electron: An object could not be cloned
© 2024 OneMinuteCode. All rights reserved.