【一】本文內容綜述html
1. keras使用流程分析(模型搭建、模型保存、模型加載、模型使用、訓練過程可視化、模型可視化等)python
2. 利用keras作文本數據預處理git
【二】環境準備github
1. 數據集下載:http://ai.stanford.edu/~amaas/data/sentiment/windows
2.安裝Graphviz ,keras進行模型可視化時,會用到該組件: https://graphviz.gitlab.io/_pages/Download/Download_windows.html瀏覽器
【三】數據預處理網絡
將imdb壓縮包解壓後,進行數據預處理。app
1. 將每條影評中的部分詞去掉dom
2. 將影評與label對應起來ide
3. 將影評映射爲int id,同時將每條影評的長度固定,好做爲定長輸入數據
# -*- coding:utf-8 -*- import keras import os import numpy as np import re from keras.preprocessing import text from keras.preprocessing import sequence from keras.utils import plot_model import matplotlib.pyplot as plt Reg = re.compile(r'[A-Za-z]*') stop_words = ['is','the','a'] max_features = 5000 word_embedding_size = 50 maxlen = 400 filters = 250 kernel_size = 3 hidden_dims = 250 def prepross(file): with open(file,encoding='utf-8') as f: data = f.readlines() data = Reg.findall(data[0]) # 將句子中的每一個單詞轉化爲小寫 data = [x.lower() for x in data] # 將句子中的部分詞從停用詞表中剔除 data = [x for x in data if x!='' and x not in stop_words] # 返回值必須是個句子,不能是單詞列表 return ' '.join(data) def imdb_load(type): root_path = "E:/nlp_data/aclImdb_v1/aclImdb/" # 遍歷全部文件 file_lists = [] pos_path = root_path + type + "/pos/" for f in os.listdir(pos_path): file_lists.append(pos_path + f) neg_path = root_path + type + "/neg/" for f in os.listdir(neg_path): file_lists.append(neg_path + f) # file_lists中前12500個爲pos,後面爲neg,labels與其保持一致 labels = [1 for i in range(12500)] labels.extend([0 for i in range(12500)]) # 將文件隨機打亂,注意file與label打亂後依舊要經過下標一一對應。 # 不然會致使 file與label不一致 index = np.arange(len(labels)) np.random.shuffle(index) # 轉化爲numpy格式 labels = np.array(labels) file_lists = np.array(file_lists) labels[index] file_lists[index] # 逐個處理文件 sentenses = [] for file in file_lists: #print(file) sentenses.append(prepross(file)) return sentenses,labels def imdb_load_data(): x_train,y_train = imdb_load("train") x_test,y_test = imdb_load("test") # 創建單詞和數字映射的詞典 token = text.Tokenizer(num_words=max_features) token.fit_on_texts(x_train) # 將影評映射到數字 x_train = token.texts_to_sequences(x_train) x_test = token.texts_to_sequences(x_test) # 讓全部影評保持固定長度的詞數目 x_train = sequence.pad_sequences(x_train,maxlen=maxlen) x_test = sequence.pad_sequences(x_test,maxlen=maxlen) return (x_train,y_train),(x_test,y_test)
【四】模型搭建與訓練
def train(): (x_train, y_train), (x_test, y_test) = imdb_load_data() model = keras.Sequential() # 構造詞嵌入層 model.add(keras.layers.Embedding(input_dim=max_features,output_dim=word_embedding_size,name="embedding")) # 經過layer名字獲取layer的信息 print(model.get_layer(name="embedding").input_shape) # 基於詞向量的堆疊方式作卷積 model.add(keras.layers.Conv1D(filters=filters,kernel_size=kernel_size,strides=1 ,activation=keras.activations.relu,name="conv1d")) # 對每個卷積出的特徵向量作最大池化 model.add(keras.layers.GlobalAvgPool1D(name="maxpool1d")) # fc,輸入是250維,輸出是hidden_dims model.add(keras.layers.Dense(units=hidden_dims,name="dense1")) # 添加激活層 model.add(keras.layers.Activation(activation=keras.activations.relu,name="relu1")) # fc,二分類問題,輸出維度爲1 model.add(keras.layers.Dense(units=1,name="dense2")) # 二分類問題,使用sigmod函數作分類器 model.add(keras.layers.Activation(activation=keras.activations.sigmoid,name="sigmoe")) # 打印模型各層layer信息 model.summary() # 模型編譯,配置loss,optimization model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.binary_crossentropy, metrics=['accuracy']) # 模型訓練 ''' # 若是想保存每個batch的loss等數據,須要傳遞一個callback history = LossHistory() train_history = model.fit(x=x_train, y=y_train, batch_size=128, epochs=1, validation_data=(x_test,y_test), callbacks=[history]) show_train_history2(history) # 結果可視化 ''' # fit 返回的log中,有 epochs 組數據,即只保存每一個epoch的最後一次的loss等值 train_history = model.fit(x=x_train, y=y_train, batch_size=128, epochs=1, validation_data=(x_test,y_test)) show_train_history(train_history) # 模型保存 model.save(filepath="./models/demo_imdb_rnn.h5") # 模型保存一份圖片 plot_model(model=model,to_file="./models/demo_imdb_rnn.png", show_layer_names=True,show_shapes=True)
【五】模型訓練過程當中loss的曲線繪製
class LossHistory(keras.callbacks.Callback): def on_train_begin(self, logs={}): self.losses = [] def on_batch_end(self, batch, logs={}): self.losses.append(logs.get('loss')) def show_train_history2(history): plt.plot(history.losses) plt.title("model losses") plt.xlabel('batch') plt.ylabel('losses') plt.legend() # 先保存圖片,後顯示,否則保存的圖片是空白 plt.savefig("./models/demo_imdb_rnn_train.png") plt.show() def show_train_history(train_history): print(train_history.history.keys()) print(train_history.epoch) plt.plot(train_history.history['acc']) plt.plot(train_history.history['val_acc']) plt.title("model accuracy") plt.xlabel("epoch") plt.ylabel("accuracy") plt.legend() plt.show() plt.plot(train_history.history['loss']) plt.plot(train_history.history['val_loss']) plt.title("model loss") plt.xlabel("epoch") plt.ylabel("loss") plt.legend() plt.show()
【六】基於訓練好的模型作預測
def gen_predict_data(path): sent = prepross(path) x_train,t_train = imdb_load("train") token = text.Tokenizer(num_words=max_features) token.fit_on_texts(x_train) x = token.texts_to_sequences([sent]) x = sequence.pad_sequences(x,maxlen=maxlen) return x RESULT = {1:'pos',0:'neg'} def predict(path): x = gen_predict_data(path) model = keras.models.load_model("./models/demo_imdb_rnn.h5") y = model.predict(x) print(y) y= model.predict_classes(x) print(y) print(RESULT[y[0][0]]) predict(r"E:\nlp_data\aclImdb_v1\aclImdb\test\neg\0_2.txt") predict(r"E:\nlp_data\aclImdb_v1\aclImdb\test\pos\0_10.txt")
預測結果以下:
[[0.16223338]] [[0]] neg [[0.8812848]] [[1]] pos
【七】總體代碼以下
# -*- coding:utf-8 -*- import keras import os import numpy as np import re from keras.preprocessing import text from keras.preprocessing import sequence from keras.utils import plot_model import matplotlib.pyplot as plt Reg = re.compile(r'[A-Za-z]*') stop_words = ['is','the','a'] max_features = 5000 word_embedding_size = 50 maxlen = 400 filters = 250 kernel_size = 3 hidden_dims = 250 class LossHistory(keras.callbacks.Callback): def on_train_begin(self, logs={}): self.losses = [] def on_batch_end(self, batch, logs={}): self.losses.append(logs.get('loss')) def prepross(file): with open(file,encoding='utf-8') as f: data = f.readlines() data = Reg.findall(data[0]) # 將句子中的每一個單詞轉化爲小寫 data = [x.lower() for x in data] # 將句子中的部分詞從停用詞表中剔除 data = [x for x in data if x!='' and x not in stop_words] # 返回值必須是個句子,不能是單詞列表 return ' '.join(data) def imdb_load(type): root_path = "E:/nlp_data/aclImdb_v1/aclImdb/" # 遍歷全部文件 file_lists = [] pos_path = root_path + type + "/pos/" for f in os.listdir(pos_path): file_lists.append(pos_path + f) neg_path = root_path + type + "/neg/" for f in os.listdir(neg_path): file_lists.append(neg_path + f) # file_lists中前12500個爲pos,後面爲neg,labels與其保持一致 labels = [1 for i in range(12500)] labels.extend([0 for i in range(12500)]) # 將文件隨機打亂,注意file與label打亂後依舊要經過下標一一對應。 # 不然會致使 file與label不一致 index = np.arange(len(labels)) np.random.shuffle(index) # 轉化爲numpy格式 labels = np.array(labels) file_lists = np.array(file_lists) labels[index] file_lists[index] # 逐個處理文件 sentenses = [] for file in file_lists: #print(file) sentenses.append(prepross(file)) return sentenses,labels def imdb_load_data(): x_train,y_train = imdb_load("train") x_test,y_test = imdb_load("test") # 創建單詞和數字映射的詞典 token = text.Tokenizer(num_words=max_features) token.fit_on_texts(x_train) # 將影評映射到數字 x_train = token.texts_to_sequences(x_train) x_test = token.texts_to_sequences(x_test) # 讓全部影評保持固定長度的詞數目 x_train = sequence.pad_sequences(x_train,maxlen=maxlen) x_test = sequence.pad_sequences(x_test,maxlen=maxlen) return (x_train,y_train),(x_test,y_test) def train(): (x_train, y_train), (x_test, y_test) = imdb_load_data() model = keras.Sequential() # 構造詞嵌入層 model.add(keras.layers.Embedding(input_dim=max_features,output_dim=word_embedding_size,name="embedding")) # 經過layer名字獲取layer的信息 print(model.get_layer(name="embedding").input_shape) # 基於詞向量的堆疊方式作卷積 model.add(keras.layers.Conv1D(filters=filters,kernel_size=kernel_size,strides=1 ,activation=keras.activations.relu,name="conv1d")) # 對每個卷積出的特徵向量作最大池化 model.add(keras.layers.GlobalAvgPool1D(name="maxpool1d")) # fc,輸入是250維,輸出是hidden_dims model.add(keras.layers.Dense(units=hidden_dims,name="dense1")) # 添加激活層 model.add(keras.layers.Activation(activation=keras.activations.relu,name="relu1")) # fc,二分類問題,輸出維度爲1 model.add(keras.layers.Dense(units=1,name="dense2")) # 二分類問題,使用sigmod函數作分類器 model.add(keras.layers.Activation(activation=keras.activations.sigmoid,name="sigmoe")) # 打印模型各層layer信息 model.summary() # 模型編譯,配置loss,optimization model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.binary_crossentropy, metrics=['accuracy']) # 模型訓練 ''' # 若是想保存每個batch的loss等數據,須要傳遞一個callback history = LossHistory() train_history = model.fit(x=x_train, y=y_train, batch_size=128, epochs=1, validation_data=(x_test,y_test), callbacks=[history]) show_train_history2(history) # 結果可視化 ''' # fit 返回的log中,有 epochs 組數據,即只保存每一個epoch的最後一次的loss等值 train_history = model.fit(x=x_train, y=y_train, batch_size=128, epochs=10, validation_data=(x_test,y_test)) show_train_history(train_history) # 模型保存 model.save(filepath="./models/demo_imdb_rnn.h5") # 模型保存一份圖片 plot_model(model=model,to_file="./models/demo_imdb_rnn.png", show_layer_names=True,show_shapes=True) def show_train_history2(history): plt.plot(history.losses) plt.title("model losses") plt.xlabel('batch') plt.ylabel('losses') plt.legend() # 先保存圖片,後顯示,否則保存的圖片是空白 plt.savefig("./models/demo_imdb_rnn_train.png") plt.show() def show_train_history(train_history): print(train_history.history.keys()) print(train_history.epoch) plt.plot(train_history.history['acc']) plt.plot(train_history.history['val_acc']) plt.title("model accuracy") plt.xlabel("epoch") plt.ylabel("accuracy") plt.legend() plt.show() plt.plot(train_history.history['loss']) plt.plot(train_history.history['val_loss']) plt.title("model loss") plt.xlabel("epoch") plt.ylabel("loss") plt.legend() plt.show() def gen_predict_data(path): sent = prepross(path) x_train,t_train = imdb_load("train") token = text.Tokenizer(num_words=max_features) token.fit_on_texts(x_train) x = token.texts_to_sequences([sent]) x = sequence.pad_sequences(x,maxlen=maxlen) return x RESULT = {1:'pos',0:'neg'} def predict(path): x = gen_predict_data(path) model = keras.models.load_model("./models/demo_imdb_rnn.h5") y = model.predict(x) print(y) y= model.predict_classes(x) print(y) print(RESULT[y[0][0]]) #train() predict(r"E:\nlp_data\aclImdb_v1\aclImdb\test\neg\0_2.txt") predict(r"E:\nlp_data\aclImdb_v1\aclImdb\test\pos\0_10.txt")
【八】結果對比與分析
本文主要參考keras example示例(https://github.com/keras-team/keras/blob/master/examples/imdb_cnn.py),該示例的imdb數據已經預處理好了。因此嘗試從新對數據進行預處理,和keras示例相比,精度基本一致。
keras模型png圖片以下:
也可使用工具Netron(https://github.com/lutzroeder/Netron)打開keras保存的.h5格式的模型
Netron是個可視化模型的神器,能夠可視化caffe/tensorflow/keras等模型
【九】可視化
上面提到了三種可視化,一是利用callback回調,記錄單個epoch下逐個batch的loss等數據,而後繪製曲線圖,或者利用history繪製多個epoch下的loss等變化曲線圖,二是將模型保存爲圖片,三是利用Netron查看.h5模型。如今介紹第四種可視化方式,即 利用tensorboard來顯示訓練過程與模型參數
使用方式比較簡單,給fit函數傳遞一個keras.callbacks.TensorBoard 做爲callback對象便可。
tensorboard = keras.callbacks.TensorBoard(log_dir="./logs/") train_history = model.fit(x=x_train, y=y_train, batch_size=128, epochs=1, validation_data=(x_test,y_test), callbacks=[tensorboard])
啓動tensorboard(tensorboard --logdir=./logs/)以後,而後在瀏覽器輸入:http://localhost:6006 ,便可看到各類信息
【十】關於其中的Embedding層
前面介紹過,可使用word2vec或者fasttext或者gensim訓練出詞向量,而這裏的Embedding好像也沒有使用訓練好的詞向量啊?緣由是這裏的embedding也是參與訓練的,他是整個流程的一部分。因此,embedding的參數解釋以下:
# 構造詞嵌入層 # input_dim ----> 詞典的最大詞數目,即V # output_dim ---->詞向量的維度大小,即m # input_length---->數據數據x的大小,即句子長度。也就是一個句子有多少個詞。因爲句子長度不一,這也是前面爲何須要 # 將句子截斷或者填充 model.add(keras.layers.Embedding(input_dim=max_features,output_dim=word_embedding_size,name="embedding"))
那若是須要使用fasttext訓練好的詞向量,怎麼辦呢?其實這個好辦,也就是一個fine-tuning的過程,不過針對上述網絡而言,僅僅對embedding層進行fine-tuning。
分爲以下三個步驟:
1. 獲取預訓練的詞向量,將其解析出來,能夠解析到一個map或者dict中,其中key=token,value=word vector。 V*M
2. 將訓練的語料(如imdb)預處理後,經過查表方式,從上述map中獲得對應詞的向量,而後獲得當前語料庫的詞向量(V1*M)。注意,這裏詞向量的size依舊爲M,只是詞典的大小換成了V1。若是當前語料庫中的某個詞再也不預訓練的詞典中,則能夠將該詞的詞向量隨機初始化。
3. 將當前語料庫的word embedding,填充到Embeeding layer的參數中。
代碼以下:
這裏以斯坦福大學經過glove訓練好的word embedding爲例
下載網址:https://nlp.stanford.edu/projects/glove/
# 初始化詞典 embedding_matrix = np.zeros(shape=(V,m)) word_index = {} embedding_index = {} # 選擇m=50的預訓練數據,將預訓練的詞與vector提取到embedding_index中存儲起來 with open("glove.6B.50d.txt") as f: for line in f: values = line.split() word = values[0] coefs = np.asarray(values[1:],dtype=np.float32) embedding_index[word] = coefs ''' x_train,t_train = imdb_load("train") token = text.Tokenizer(num_words=max_features) token.fit_on_texts(x_train) ''' # 獲取當前語料(imdb)的詞 word_index = token.word_index not_find = 0 for word,i in word_index.items(): if i < V: # 查預訓練的詞表 embedding_vec = embedding_index.get(word) if embedding_vec is not None: embedding_matrix[i] = embedding_vec else: not_find += 1 # 將權值設置到embedding layer中 model.layers[0].set_weigth([embedding_matrix]) # frozen embedding layer,也能夠不凍結。不凍結的話就能夠fine-tuning該層 model.layers[0].trainable = False