tensorflow很擅長於搭建神經網絡作圖像識別任務,咱們用MNIST手寫圖像集來作數據源,試驗經典的深度學習CNN模型。git
(1)導入mnist數據集,這裏圖像image是[batch_size,784]大小,labels是[batch_size,10]大小網絡
(2)設置X、Y佔位符session
(3)搭建圖像卷積池化激活神經網絡ide
(4)訓練數據訓練模型,獲得loss和acc曲線,評估模型效果。學習
import warnings warnings.filterwarnings('ignore') import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data import numpy as np import os # 讀取mnist數據,image是[batch_size,784]大小,labels是[batch_size,10]大小 mnist = input_data.read_data_sets('MNIST_data', one_hot=True) batch_size = 100 X_holder = tf.placeholder(tf.float32,shape=[None, 784]) y_holder = tf.placeholder(tf.float32,shape=[None, 10]) images, labels = mnist.train.next_batch(batch_size) print("image shape:%s, labels shape:%s"%(images.shape,labels.shape)) #權重在初始化時應該加入少許的噪聲來打破對稱性以及避免0梯度,避免神經元節點輸出恆爲0的問題(dead neurons) def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #第一層卷積層,32個卷積核去分別關注32個特徵 W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) x_image = tf.reshape(X_holder, [-1,28,28,1])#將單張圖片從784維向量從新還原爲28x28的矩陣圖片,-1表示取出全部的數據 h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) #第二層卷積層 W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) #全鏈接層 W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) #使用Dropout,訓練時爲0.5,測試時爲1,keep_prob表示保留不關閉的神經元的比例 keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) #把1024維的向量轉換成10維,對應10個類別 W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 #交叉熵 loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=y_holder, logits=y_conv)) #定義train_step train = tf.train.AdamOptimizer(1e-4).minimize(loss) #定義測試準確率 correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_holder,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) session = tf.Session() init = tf.global_variables_initializer() session.run(init) iterations = 1000 def trainme(): steps = np.zeros(iterations) LOSS = np.zeros_like(steps) for step in range(iterations): train_X, train_Y = mnist.train.next_batch(batch_size) _,loss_value,accuracy_value = session.run([train,loss,accuracy], feed_dict={X_holder:train_X, y_holder:train_Y,keep_prob:0.5}) steps[step]=step LOSS[step]=accuracy_value if step % 25 == 0: print('step:%d accuracy:%.4f, loss:%s' %(step, accuracy_value,loss_value)) #show plt import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) ax.plot(steps,LOSS,label='loss') ax.set_xlabel('step') ax.set_ylabel('loss') fig.suptitle('MSE') handles,labels = ax.get_legend_handles_labels() ax.legend(handles,labels=labels) plt.show() def batchPredict(batch_size): test_X,test_Y = mnist.test.next_batch(batch_size) predict_labels = session.run(predict_y, feed_dict={X_holder:test_X, y_holder:test_Y}) image_number = test_X.shape[0] for index in range(image_number): if index < image_number: image = test_X[index] actual = np.argmax(test_Y[index]) predict = np.argmax(predict_labels[index]) isTrue = actual==predict title = 'actual:%d ,predict:%d' %(actual,predict) if not isTrue: print(title) print(predict_labels[index]) trainme()