1.首先咱們瞭解一下keras中的Embedding層:from keras.layers.embeddings import Embedding:python
Embedding參數以下:數組
輸入尺寸:(batch_size,input_length)網絡
輸出尺寸:(batch_size,input_length,output_dim)dom
舉個例子:(隨機初始化Embedding):ui
from keras.models import Sequential from keras.layers import Embedding import numpy as np model = Sequential() model.add(Embedding(1000, 64, input_length=10)) # 輸入大小爲(None,10),Nnoe是batch_size大小,10表明每個batch中有10條樣本 # 輸出大小爲(None, 10, 64),其中64表明輸入中每一個每條樣本被embedding成了64維的向量 input_array = np.random.randint(1000, size=(32, 10)) model.compile('rmsprop', 'mse') output_array = model.predict(input_array) print(output_array) assert output_array.shape == (32, 10, 64)
具體能夠看下面的例子:spa
from keras.models import Sequential from keras.layers import Flatten, Dense, Embedding import numpy as np model = Sequential() model.add(Embedding(3, 2, input_length=7))
#通俗的講,這個過程當中,Embedding層生成了一個大小爲3*2的隨機矩陣(3表明詞彙表大小,,2表明沒個詞embedding後的向量大小),記爲M,查看矩陣M:
model.layers[0].get_weights() #輸出 [array([[-0.00732628, -0.02913231], [ 0.00573028, 0.0329752 ], [-0.0401206 , -0.01729034]], dtype=float32)]
矩陣的每一行是該行下標指示的標記的數值向量,即矩陣M的第i(0,1,2)行是index爲i的單詞對應的數值向量,好比說,個人輸入若是index=1,則對應的embedding向量= [ 0.00573028, 0.0329752 ],具體看下面:.net
data = np.array([[0,1,2,1,1,0,1],[0,1,2,1,1,0,1]] model.predict(data)) #輸出 [[[-0.00732628 -0.02913231] [ 0.00573028 0.0329752 ] [-0.0401206 -0.01729034] [ 0.00573028 0.0329752 ] [ 0.00573028 0.0329752 ] [-0.00732628 -0.02913231] [ 0.00573028 0.0329752 ]] [[-0.00732628 -0.02913231] [ 0.00573028 0.0329752 ] [-0.0401206 -0.01729034] [ 0.00573028 0.0329752 ] [ 0.00573028 0.0329752 ] [-0.00732628 -0.02913231] [ 0.00573028 0.0329752 ]]]
data是Embedding層的輸入,它包含2個batch,每一個batch有7條樣本,即data.shape = (2,7), 輸出out的shape = (2,7,2),即每一條樣本被embedding成了2維向量。code
有時候咱們能夠用預訓練好的embedding matrix初始化(使用百度百科(word2vec)的語料庫):blog
def create_embedding(word_index, num_words, word2vec_model): embedding_matrix = np.zeros((num_words, EMBEDDING_DIM)) for word, i in word_index.items(): try: embedding_vector = word2vec_model[word] embedding_matrix[i] = embedding_vector except: continue
return embedding_matrix #word_index:詞典 #num_word:詞典長度+1 #word2vec_model:詞向量的model embedding_matrix = create_embedding(word_index, num_words, word2vec_model) model = Sequential() embedding_layer = Embedding(num_words, EMBEDDING_DIM, #embedding後的向量大小 embeddings_initializer=Constant(embedding_matrix), #使用預訓練好的embedding matrix初始化 input_length=MAX_SEQUENCE_LENGTH, #輸入的每一個batch中樣本個數 trainable=False) input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') embedded_input = embedding_layer(sequence_input)
model.add(embedded_sequences)
其實Keras實現LLSTM(其它網絡模型也同樣),就像是在堆積木:ip
#單層LSTM
model = Sequential() model.add(Embedding(len(words)+1, 256, input_length=maxlen)) model.add(LSTM(output_dim=128, activation='sigmoid', inner_activation='hard_sigmoid')) model.add(Dropout(0.5)) model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) #model.compile(loss='binary_crossentropy', optimizer='adam', class_mode="binary") model.fit(x, y, batch_size=16, nb_epoch=10) y_= model.predict_classes(x)
#多層LSTM
model = Sequential()
#多層LSTM中,最後一個LSTM層return_sequences一般爲false,非最後一層爲True
#return_sequences:默認爲false。當爲False時,返回最後一層最後一個步長的隱藏狀態;當爲True時,返回最後一層的全部隱藏狀態
model.add(LSTM(layers[1], input_shape=(seq_len, layers[0]),return_sequences=True))
#model.add(Dropout(0.2))
model.add(LSTM(layers[2],return_sequences=False))
#model.add(Dropout(0.2))
model.add(Dense(units=layers[3], activation='tanh'))
下面附上LSTM在keras中參數return_sequences,return_state的超詳細區別:
return_sequences:默認爲false。當爲假時,返回最後一層最後一個步長的隱藏狀態;當爲真時,返回最後一層的全部隱藏狀態。
return_state:默認false。當爲真時,返回最後一層的最後一個步長的輸出隱藏狀態和輸入單元狀態。
下圖的輸入是一個步長爲3,維度爲1的數組。
一共有2層神經網絡(其中第一層必須加上「return_sequences =真」,這樣才能轉化成步長爲3的輸入變量)
from keras.models import Model from keras.layers import Input from keras.layers import LSTM from numpy import array from keras.models import Sequential data = array([0.1,0.2,0.3]).reshape((1,3,1)) inputs1 = Input(shape=(3,1)) lstm1,state_h,state_c = LSTM(2,return_sequences=True,return_state=True)(inputs1) #第一層LSTM lstm2 = LSTM(2,return_sequences=True)(lstm1) #第二層LSTM model = Model(input = inputs1,outputs = [lstm2]) print(model.predict(data))
輸出結果爲:(最後一層LSTM2的每個時間步長hidden_state的結果)
[[[0.00120299 0.0009285]
[0.0040868 0.00327]
[0.00869473 0.00720878]]]
from keras.models import Model from keras.layers import Input from keras.layers import LSTM from numpy import array from keras.models import Sequential data = array([0.1,0.2,0.3]).reshape((1,3,1)) inputs1 = Input(shape=(3,1)) lstm1,state_h,state_c = LSTM(2,return_sequences=True,return_state=True)(inputs1) lstm2,state_h2,state_c2 = LSTM(2,return_state=True)(lstm1) model = Model(input = inputs1,outputs = [lstm2,state_h2,state_c2]) print(model.predict(data))
輸出爲:
由於return_state =真,返回了最後一層最後一個時間步長的輸出hidden_state和輸入cell_state。
[array([[ - 0.00234587,0.00718377]],dtype = float32),array([[ - 0.00234587,0.00718377]],dtype = float32),array([[ - 0.00476015,0.01406127]],dtype = float32)]
from keras.models import Model from keras.layers import Input from keras.layers import LSTM from numpy import array from keras.models import Sequential data = array([0.1,0.2,0.3]).reshape((1,3,1)) inputs1 = Input(shape=(3,1)) lstm1,state_h,state_c = LSTM(2,return_sequences=True,return_state=True)(inputs1) lstm2,state_h2,state_c2 = LSTM(2,return_sequences=True,return_state=True)(lstm1) model = Model(input = inputs1,outputs = [lstm2,state_h2,state_c2]) print(model.predict(data))
輸出爲:最後一層全部時間步長的隱藏狀態,及最後一層最後一步的隱藏狀態,細胞狀態。
[array([[[ - [2.0248523e-04,-1.0290105e-03],
[ - 3.6455912e-04,-3.3424206e-03],
[ - 3.66696041e-05,-6.6624139e-03]]],dtype = FLOAT32),
array([[ - 3.669604e-05,-6.662414e-03]],dtype = float32),
array([[ - 7.3107367e-05,-1.3788906e-02]],dtype = float32)]
from keras.models import Model from keras.layers import Input from keras.layers import LSTM from numpy import array from keras.models import Sequential data = array([0.1,0.2,0.3]).reshape((1,3,1)) inputs1 = Input(shape=(3,1)) lstm1,state_h,state_c = LSTM(2,return_sequences=True,return_state=True)(inputs1) lstm2 = LSTM(2)(lstm1) model = Model(input = inputs1,outputs = [lstm2]) print(model.predict(data))
輸出爲:最後一層的最後一個步長的隱藏狀態。
[[-0.01998264 -0.00451741]]
本文參考自:
https://www.jianshu.com/p/a3f3033a7379
https://blog.csdn.net/qq_33472765/article/details/86561245
https://www.wandouip.com/t5i152855/
https://blog.csdn.net/weixin_36541072/article/details/53786020