一:保存git
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data #載入數據集 mnist = input_data.read_data_sets("MNIST_data",one_hot=True) #每一個批次100張照片 batch_size = 100 #計算一共有多少個批次 n_batch = mnist.train.num_examples // batch_size #定義兩個placeholder x = tf.placeholder(tf.float32,[None,784]) y = tf.placeholder(tf.float32,[None,10]) #建立一個簡單的神經網絡,輸入層784個神經元,輸出層10個神經元 W = tf.Variable(tf.zeros([784,10])) b = tf.Variable(tf.zeros([10])) prediction = tf.nn.softmax(tf.matmul(x,W)+b) #二次代價函數 # loss = tf.reduce_mean(tf.square(y-prediction)) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y,logits=prediction)) #使用梯度降低法 train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss) #初始化變量 init = tf.global_variables_initializer() #結果存放在一個布爾型列表中 correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一維張量中最大的值所在的位置 #求準確率 accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) saver = tf.train.Saver() with tf.Session() as sess: sess.run(init) for epoch in range(11): for batch in range(n_batch): batch_xs,batch_ys = mnist.train.next_batch(batch_size) sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys}) acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}) print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc)) #保存模型 saver.save(sess,'net/my_net.ckpt')
結果:網絡
Iter 0,Testing Accuracy 0.8252 Iter 1,Testing Accuracy 0.8916 Iter 2,Testing Accuracy 0.9008 Iter 3,Testing Accuracy 0.906 Iter 4,Testing Accuracy 0.9091 Iter 5,Testing Accuracy 0.9104 Iter 6,Testing Accuracy 0.911 Iter 7,Testing Accuracy 0.9127 Iter 8,Testing Accuracy 0.9145 Iter 9,Testing Accuracy 0.9166 Iter 10,Testing Accuracy 0.9177
二:載入函數
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data #載入數據集 mnist = input_data.read_data_sets("MNIST_data",one_hot=True) #每一個批次100張照片 batch_size = 100 #計算一共有多少個批次 n_batch = mnist.train.num_examples // batch_size #定義兩個placeholder x = tf.placeholder(tf.float32,[None,784]) y = tf.placeholder(tf.float32,[None,10]) #建立一個簡單的神經網絡,輸入層784個神經元,輸出層10個神經元 W = tf.Variable(tf.zeros([784,10])) b = tf.Variable(tf.zeros([10])) prediction = tf.nn.softmax(tf.matmul(x,W)+b) #二次代價函數 # loss = tf.reduce_mean(tf.square(y-prediction)) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y,logits=prediction)) #使用梯度降低法 train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss) #初始化變量 init = tf.global_variables_initializer() #結果存放在一個布爾型列表中 correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一維張量中最大的值所在的位置 #求準確率 accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) saver = tf.train.Saver() with tf.Session() as sess: sess.run(init) # 未載入模型時的識別率 print('未載入識別率',sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})) saver.restore(sess,'net/my_net.ckpt') # 載入模型後的識別率 print('載入後識別率',sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}))
結果:spa
未載入識別率 0.098 INFO:tensorflow:Restoring parameters from net/my_net.ckpt 載入後識別率 0.9177