數據集來自 http://yann.lecun.com/exdb/mnist/
尚未理解透徹CNN的結構和功能、原理blabla,先參考已有模型寫一個看看
實現方法:Keras 2.3.1 + Tensorflow 2.1 + CUDA 10.1
參考了AlexNet的卷積神經網絡結構,根據訓練樣本的特性修改獲得一個新的CNN。
因爲訓練樣本所有是黑白圖像,只須要讀取單通道的灰度信息便可,輸入數據維數是\(28\times 28\times 1\).
網絡的結構以下,激活函數所有使用relu,optimizer使用Adam
1:卷積層1,採用\(5\times 5\)卷積核,步長\((2,2)\),64層filter,padding方式採起邊界補零,該層輸出爲\(13\times13\times64\)
2:池化層1,\(2\times 2\)平均值池化,步長\((2,2)\).padding方式採起邊界補零,輸出\(7\times 7\times64\)
3:正則化1
4:卷積層2:使用\(3\times3\)大小的卷積核,步長\((1,1)\),192層filter,padding方式邊界補零,輸出\(5\times 5\times 192\)
5:池化層2:\(2\times 2\)平均值池化,步長\((1,1)\).padding方式去除多餘邊界,輸出\(4\times4\times 192\)
6:正則化2
7:全鏈接+dropoutpython
# CNNmod.py from tensorflow import keras from keras.models import Sequential from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization def myNetwork(img_rows, img_cols): inputs = keras.Input(shape=[img_rows, img_cols, 1]) # 卷積層1 conv1 = keras.layers.Conv2D(filters=64, kernel_size=[5, 5], strides=[2, 2], activation=keras.activations.relu, use_bias=True, padding='same')(inputs) # 池化層1 pooling1 = keras.layers.AveragePooling2D(pool_size=[2, 2], strides=[2, 2], padding='valid')(conv1) # 正則化層1 stand1 = keras.layers.BatchNormalization(axis=1)(pooling1) # 卷積層2 conv2 = keras.layers.Conv2D(filters=192, kernel_size=[3, 3], strides=[1, 1], activation=keras.activations.relu, use_bias=True, padding='same')(stand1) # 池化層2 pooling2 = keras.layers.AveragePooling2D(pool_size=[2, 2], strides=[1, 1], padding='valid')(conv2) # 正則化層2 stand2 = keras.layers.BatchNormalization(axis=1)(pooling2) # 全鏈接層 flatten = keras.layers.Flatten()(stand2) fc1 = keras.layers.Dense(4096, activation=keras.activations.relu, use_bias=True)(flatten) drop1 = keras.layers.Dropout(0.5)(fc1) fc2 = keras.layers.Dense(4096, activation=keras.activations.relu, use_bias=True)(drop1) drop2 = keras.layers.Dropout(0.5)(fc2) fc3 = keras.layers.Dense(10, activation=keras.activations.softmax, use_bias=True)(drop2) # 構建模型 return keras.Model(inputs=inputs, outputs=fc3)
訓練主程序:es6
# train.py from tensorflow import keras import cv2 from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D from keras.layers.normalization import BatchNormalization from keras.callbacks import ModelCheckpoint import numpy as np import tensorflow as tf from tensorflow import optimizers import model.CNNmod as mod batch_size = 128 num_classes = 10 epochs = 10 img_shape = (28, 28, 1) img_rows, img_cols = 28, 28 x_train = [] x_test = [] y_train = [] y_test = [] log_dir = "./logs/" # 打開數據集的txt with open(r"./preprocess_train.txt", "r") as f: lines = f.readlines() for i in range(60000): name = lines[i].split(";")[0] img = cv2.imread(r"./train_set/" + name) img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 轉換成灰度圖片 img = img / 255 x_train.append(img) y_train.append(lines[i].split(';')[1]) f.close() print("訓練數據集讀取完成") with open(r"./preprocess_test.txt", "r") as f: lines = f.readlines() for i in range(10000): name = lines[i].split(";")[0] img = cv2.imread(r"./test_set/" + name) img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img = img / 255 x_test.append(img) y_test.append(lines[i].split(';')[1]) f.close() print("測試數據集讀取完成") # 數組轉換 x_train = np.array(x_train) x_test = np.array(x_test) y_train = np.array(y_train) y_test = np.array(y_test) y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) # 編譯模型 model = mod.myNetwork(28, 28) model.compile(optimizer=tf.optimizers.Adam(0.001), loss=keras.losses.categorical_crossentropy, metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test)) model.save_weights(log_dir + 'latest.h5')
數據集預處理,使用opencv將mnist數據集轉換成.jpg格式存儲,標籤存在文件名裏,另外保存了一個.txt文件易於讀取數組
# preprocess.py import os import struct import numpy as np import cv2 def save_mnist_to_jpg(mnist_image_file, mnist_label_file, save_dir): if 'train' in os.path.basename(mnist_image_file): prefix = 'train' else: prefix = 'test' labelIndex = 0 imageIndex = 0 i = 0 lbdata = open(mnist_label_file, 'rb').read() magic, nums = struct.unpack_from(">II", lbdata, labelIndex) labelIndex += struct.calcsize('>II') imgdata = open(mnist_image_file, "rb").read() magic, nums, numRows, numColumns = struct.unpack_from( '>IIII', imgdata, imageIndex) imageIndex += struct.calcsize('>IIII') for i in range(nums): label = struct.unpack_from('>B', lbdata, labelIndex)[0] labelIndex += struct.calcsize('>B') im = struct.unpack_from('>784B', imgdata, imageIndex) imageIndex += struct.calcsize('>784B') im = np.array(im, dtype='uint8') img = im.reshape(28, 28) save_name = os.path.join( save_dir, '{}_{}_{}.jpg'.format(prefix, i, label)) cv2.imwrite(save_name, img) if __name__ == '__main__': train_images = './dataset/train-images.idx3-ubyte' # 訓練集圖像的文件名 train_labels = './dataset/train-labels.idx1-ubyte' # 訓練集label的文件名 test_images = './dataset/t10k-images.idx3-ubyte' # 測試集圖像的文件名 test_labels = './dataset/t10k-labels.idx1-ubyte' # 測試集label的文件名 save_train_dir = './train_set' save_test_dir = './test_set' if not os.path.exists(save_train_dir): os.makedirs(save_train_dir) if not os.path.exists(save_test_dir): os.makedirs(save_test_dir) save_mnist_to_jpg(test_images, test_labels, save_test_dir) save_mnist_to_jpg(train_images, train_labels, save_train_dir) photos = os.listdir("./train_set") with open("./preprocess_train.txt", "w") as f: for photo in photos: num = photo.split("_")[2].split(".")[0] f.write(photo + ";" + num + "\n") f.close() photos = os.listdir("./test_set") with open("./preprocess_test.txt", "w") as f: for photo in photos: num = photo.split("_")[2].split(".")[0] f.write(photo + ";" + num + "\n") f.close()
使用以上代碼和全部60000張樣本訓練10個epochs,batch_size爲128,測試集使用所有10000張圖片。使用CUDA10.1 + RTX2070進行訓練,總時間約60秒訓練完畢,測試集的錯誤率約0.9%。
接下來對上述代碼進行修改。相對於原模型只修改一種變量
(1).更改池化層的池化方式,將兩個池化層由平均值池化改成最大值池化,驗證測試集的錯誤率約0.86%
(2).更改池化層padding方式:由'valid'改成'same',驗證測試集錯誤率無明顯變化。
(3).更改卷積層1的激活函數:由relu改成sigmoid函數,觀察到測試集錯誤率收斂速度增長,當訓練完第二個Epoch時,驗證測試集的錯誤率已經下降到約1.78%.(使用relu時,同階段錯誤率約5%),但使用sigmoid時,模型最終的錯誤率基本再也不降低,約1.45%.(使用relu時錯誤率約0.9%)網絡
參考:https://blog.csdn.net/weixin_41055137/article/details/81071226app