TensorFlow2.0教程-使用keras訓練模型

1.通常的模型構造、訓練、測試流程

 1 # 模型構造
 2 inputs = keras.Input(shape=(784,), name='mnist_input')  3 h1 = layers.Dense(64, activation='relu')(inputs)  4 h1 = layers.Dense(64, activation='relu')(h1)  5 outputs = layers.Dense(10, activation='softmax')(h1)  6 model = keras.Model(inputs, outputs)  7 # keras.utils.plot_model(model, 'net001.png', show_shapes=True)
 8 
 9 model.compile(optimizer=keras.optimizers.RMSprop(), 10              loss=keras.losses.SparseCategoricalCrossentropy(), 11              metrics=[keras.metrics.SparseCategoricalAccuracy()]) 12 
13 # 載入數據
14 (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() 15 x_train = x_train.reshape(60000, 784).astype('float32') /255
16 x_test = x_test.reshape(10000, 784).astype('float32') /255
17 
18 x_val = x_train[-10000:] 19 y_val = y_train[-10000:] 20 
21 x_train = x_train[:-10000] 22 y_train = y_train[:-10000] 23 
24 # 訓練模型
25 history = model.fit(x_train, y_train, batch_size=64, epochs=3, 26          validation_data=(x_val, y_val)) 27 print('history:') 28 print(history.history) 29 
30 result = model.evaluate(x_test, y_test, batch_size=128) 31 print('evaluate:') 32 print(result) 33 pred = model.predict(x_test[:2]) 34 print('predict:') 35 print(pred)

2.自定義損失和指標

自定義指標只需繼承Metric類, 並重寫一下函數git

_init_(self),初始化。數組

update_state(self,y_true,y_pred,sample_weight = None),它使用目標y_true和模型預測y_pred來更新狀態變量。網絡

result(self),它使用狀態變量來計算最終結果。app

reset_states(self),從新初始化度量的狀態。less

 1 # 這是一個簡單的示例,顯示如何實現CatgoricalTruePositives指標,該指標計算正確分類爲屬於給定類的樣本數量
 2 
 3 class CatgoricalTruePostives(keras.metrics.Metric):  4     def __init__(self, name='binary_true_postives', **kwargs):  5         super(CatgoricalTruePostives, self).__init__(name=name, **kwargs)  6         self.true_postives = self.add_weight(name='tp', initializer='zeros')  7         
 8     def update_state(self, y_true, y_pred, sample_weight=None):  9         y_pred = tf.argmax(y_pred) 10         y_true = tf.equal(tf.cast(y_pred, tf.int32), tf.cast(y_true, tf.int32)) 11         
12         y_true = tf.cast(y_true, tf.float32) 13         
14         if sample_weight is not None: 15             sample_weight = tf.cast(sample_weight, tf.float32) 16             y_true = tf.multiply(sample_weight, y_true) 17             
18         return self.true_postives.assign_add(tf.reduce_sum(y_true)) 19     
20     def result(self): 21         return tf.identity(self.true_postives) 22     
23     def reset_states(self): 24  self.true_postives.assign(0.) 25         
26 
27 model.compile(optimizer=keras.optimizers.RMSprop(1e-3), 28              loss=keras.losses.SparseCategoricalCrossentropy(), 29              metrics=[CatgoricalTruePostives()]) 30 
31 model.fit(x_train, y_train, 32          batch_size=64, epochs=3) 33             
34             
35
 1 # 以定義網絡層的方式添加網絡loss
 2 class ActivityRegularizationLayer(layers.Layer):  3     def call(self, inputs):  4         self.add_loss(tf.reduce_sum(inputs) * 0.1)  5         return inputs  6 
 7 inputs = keras.Input(shape=(784,), name='mnist_input')  8 h1 = layers.Dense(64, activation='relu')(inputs)  9 h1 = ActivityRegularizationLayer()(h1) 10 h1 = layers.Dense(64, activation='relu')(h1) 11 outputs = layers.Dense(10, activation='softmax')(h1) 12 model = keras.Model(inputs, outputs) 13 # keras.utils.plot_model(model, 'net001.png', show_shapes=True)
14 
15 model.compile(optimizer=keras.optimizers.RMSprop(), 16              loss=keras.losses.SparseCategoricalCrossentropy(), 17              metrics=[keras.metrics.SparseCategoricalAccuracy()]) 18 model.fit(x_train, y_train, batch_size=32, epochs=1)
 1 # 也能夠以定義網絡層的方式添加要統計的metric
 2 class MetricLoggingLayer(layers.Layer):  3     def call(self, inputs):  4  self.add_metric(keras.backend.std(inputs),  5                        name='std_of_activation',  6                        aggregation='mean')  7         
 8         return inputs  9 
10 inputs = keras.Input(shape=(784,), name='mnist_input') 11 h1 = layers.Dense(64, activation='relu')(inputs) 12 h1 = MetricLoggingLayer()(h1) 13 h1 = layers.Dense(64, activation='relu')(h1) 14 outputs = layers.Dense(10, activation='softmax')(h1) 15 model = keras.Model(inputs, outputs) 16 # keras.utils.plot_model(model, 'net001.png', show_shapes=True)
17 
18 model.compile(optimizer=keras.optimizers.RMSprop(), 19              loss=keras.losses.SparseCategoricalCrossentropy(), 20              metrics=[keras.metrics.SparseCategoricalAccuracy()]) 21 model.fit(x_train, y_train, batch_size=32, epochs=1)
 1 # 也能夠直接在model上面加
 2 # 也能夠以定義網絡層的方式添加要統計的metric
 3 class MetricLoggingLayer(layers.Layer):  4     def call(self, inputs):  5  self.add_metric(keras.backend.std(inputs),  6                        name='std_of_activation',  7                        aggregation='mean')  8         
 9         return inputs 10 
11 inputs = keras.Input(shape=(784,), name='mnist_input') 12 h1 = layers.Dense(64, activation='relu')(inputs) 13 h2 = layers.Dense(64, activation='relu')(h1) 14 outputs = layers.Dense(10, activation='softmax')(h2) 15 model = keras.Model(inputs, outputs) 16 
17 model.add_metric(keras.backend.std(inputs), 18                        name='std_of_activation', 19                        aggregation='mean') 20 model.add_loss(tf.reduce_sum(h1)*0.1) 21 
22 # keras.utils.plot_model(model, 'net001.png', show_shapes=True)
23 
24 model.compile(optimizer=keras.optimizers.RMSprop(), 25              loss=keras.losses.SparseCategoricalCrossentropy(), 26              metrics=[keras.metrics.SparseCategoricalAccuracy()]) 27 model.fit(x_train, y_train, batch_size=32, epochs=1)

處理使用validation_data傳入測試數據,還可使用validation_split劃分驗證數據ide

ps:validation_split只能在用numpy數據訓練的狀況下使用函數

1 model.fit(x_train, y_train, batch_size=32, epochs=1, validation_split=0.2)

3.使用tf.data構造數據

 1 def get_compiled_model():  2     inputs = keras.Input(shape=(784,), name='mnist_input')  3     h1 = layers.Dense(64, activation='relu')(inputs)  4     h2 = layers.Dense(64, activation='relu')(h1)  5     outputs = layers.Dense(10, activation='softmax')(h2)  6     model = keras.Model(inputs, outputs)  7     model.compile(optimizer=keras.optimizers.RMSprop(),  8                  loss=keras.losses.SparseCategoricalCrossentropy(),  9                  metrics=[keras.metrics.SparseCategoricalAccuracy()]) 10     return model 11 model = get_compiled_model() 12 train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) 13 train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) 14 
15 val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) 16 val_dataset = val_dataset.batch(64) 17 
18 # model.fit(train_dataset, epochs=3)
19 # steps_per_epoch 每一個epoch只訓練幾步
20 # validation_steps 每次驗證,驗證幾步
21 model.fit(train_dataset, epochs=3, steps_per_epoch=100, 22          validation_data=val_dataset, validation_steps=3)

4.樣本權重和類權重

「樣本權重」數組是一個數字數組,用於指定批處理中每一個樣本在計算總損失時應具備多少權重。 它一般用於不平衡的分類問題(這個想法是爲了給予不多見的類更多的權重)。 當使用的權重是1和0時,該數組能夠用做損失函數的掩碼(徹底丟棄某些樣本對總損失的貢獻)。工具

「類權重」dict是同一律唸的更具體的實例:它將類索引映射到應該用於屬於該類的樣本的樣本權重。 例如,若是類「0」比數據中的類「1」少兩倍,則可使用class_weight = {0:1.,1:0.5}。oop

 1 # 增長第5類的權重
 2 import numpy as np  3 # 樣本權重
 4 model = get_compiled_model()  5 class_weight = {i:1.0 for i in range(10)}  6 class_weight[5] = 2.0
 7 print(class_weight)  8 model.fit(x_train, y_train,  9          class_weight=class_weight, 10          batch_size=64, 11          epochs=4) 12 # 類權重
13 model = get_compiled_model() 14 sample_weight = np.ones(shape=(len(y_train),)) 15 sample_weight[y_train == 5] = 2.0
16 model.fit(x_train, y_train, 17          sample_weight=sample_weight, 18          batch_size=64, 19          epochs=4)

5.多輸入多輸出模型

 1 image_input = keras.Input(shape=(32, 32, 3), name='img_input')  2 timeseries_input = keras.Input(shape=(None, 10), name='ts_input')  3 
 4 x1 = layers.Conv2D(3, 3)(image_input)  5 x1 = layers.GlobalMaxPooling2D()(x1)  6 
 7 x2 = layers.Conv1D(3, 3)(timeseries_input)  8 x2 = layers.GlobalMaxPooling1D()(x2)  9 
10 x = layers.concatenate([x1, x2]) 11 
12 score_output = layers.Dense(1, name='score_output')(x) 13 class_output = layers.Dense(5, activation='softmax', name='class_output')(x) 14 
15 model = keras.Model(inputs=[image_input, timeseries_input], 16                     outputs=[score_output, class_output]) 17 keras.utils.plot_model(model, 'multi_input_output_model.png'
18                        , show_shapes=True)
 1 # 能夠爲模型指定不一樣的loss和metrics
 2 model.compile(  3     optimizer=keras.optimizers.RMSprop(1e-3),  4     loss=[keras.losses.MeanSquaredError(),  5  keras.losses.CategoricalCrossentropy()])  6 
 7 # 還能夠指定loss的權重
 8 model.compile(  9     optimizer=keras.optimizers.RMSprop(1e-3), 10     loss={'score_output': keras.losses.MeanSquaredError(), 11           'class_output': keras.losses.CategoricalCrossentropy()}, 12     metrics={'score_output': [keras.metrics.MeanAbsolutePercentageError(), 13  keras.metrics.MeanAbsoluteError()], 14              'class_output': [keras.metrics.CategoricalAccuracy()]}, 15     loss_weight={'score_output': 2., 'class_output': 1.}) 16 
17 # 能夠把不須要傳播的loss置0
18 model.compile( 19     optimizer=keras.optimizers.RMSprop(1e-3), 20     loss=[None, keras.losses.CategoricalCrossentropy()]) 21 
22 # Or dict loss version
23 model.compile( 24     optimizer=keras.optimizers.RMSprop(1e-3), 25     loss={'class_output': keras.losses.CategoricalCrossentropy()})

6.使用回調

Keras中的回調是在訓練期間(在epoch開始時,batch結束時,epoch結束時等)在不一樣點調用的對象,可用於實現如下行爲:post

在培訓期間的不一樣時間點進行驗證(超出內置的每一個時期驗證)
按期檢查模型或超過某個精度閾值
在訓練彷佛平穩時改變模型的學習率
在訓練彷佛平穩時對頂層進行微調
在培訓結束或超出某個性能閾值時發送電子郵件或即時消息通知等等。

可以使用的內置回調有

ModelCheckpoint:按期保存模型。
EarlyStopping:當訓練再也不改進驗證指標時中止培訓。
TensorBoard:按期編寫可在TensorBoard中顯示的模型日誌(更多細節見「可視化」)。
CSVLogger:將丟失和指標數據流式傳輸到CSV文件。
等等

6.1回調使用

 1 model = get_compiled_model()  2 
 3 callbacks = [  4  keras.callbacks.EarlyStopping(  5         # Stop training when `val_loss` is no longer improving
 6         monitor='val_loss',  7         # "no longer improving" being defined as "no better than 1e-2 less"
 8         min_delta=1e-2,  9         # "no longer improving" being further defined as "for at least 2 epochs"
10         patience=2, 11         verbose=1) 12 ] 13 model.fit(x_train, y_train, 14           epochs=20, 15           batch_size=64, 16           callbacks=callbacks, 17           validation_split=0.2)
 1 # checkpoint模型回調
 2 model = get_compiled_model()  3 check_callback = keras.callbacks.ModelCheckpoint(  4     filepath='mymodel_{epoch}.h5',  5     save_best_only=True,  6     monitor='val_loss',  7     verbose=1
 8 )  9 
10 model.fit(x_train, y_train, 11          epochs=3, 12          batch_size=64, 13          callbacks=[check_callback], 14          validation_split=0.2)
1 # 動態調整學習率
2 initial_learning_rate = 0.1
3 lr_schedule = keras.optimizers.schedules.ExponentialDecay( 4  initial_learning_rate, 5     decay_steps=10000, 6     decay_rate=0.96, 7     staircase=True 8 ) 9 optimizer = keras.optimizers.RMSprop(learning_rate=lr_schedule)
1 # 使用tensorboard
2 tensorboard_cbk = keras.callbacks.TensorBoard(log_dir='./full_path_to_your_logs') 3 model.fit(x_train, y_train, 4          epochs=5, 5          batch_size=64, 6          callbacks=[tensorboard_cbk], 7          validation_split=0.2)

6.2建立本身的回調方法

 1 class LossHistory(keras.callbacks.Callback):  2     def on_train_begin(self, logs):  3         self.losses = []  4     def on_epoch_end(self, batch, logs):  5         self.losses.append(logs.get('loss'))  6         print('\nloss:',self.losses[-1])  7         
 8 model = get_compiled_model()  9 
10 callbacks = [ 11  LossHistory() 12 ] 13 model.fit(x_train, y_train, 14           epochs=3, 15           batch_size=64, 16           callbacks=callbacks, 17           validation_split=0.2)

7.本身構造訓練和驗證循環

 1 # Get the model.
 2 inputs = keras.Input(shape=(784,), name='digits')  3 x = layers.Dense(64, activation='relu', name='dense_1')(inputs)  4 x = layers.Dense(64, activation='relu', name='dense_2')(x)  5 outputs = layers.Dense(10, activation='softmax', name='predictions')(x)  6 model = keras.Model(inputs=inputs, outputs=outputs)  7 
 8 # Instantiate an optimizer.
 9 optimizer = keras.optimizers.SGD(learning_rate=1e-3) 10 # Instantiate a loss function.
11 loss_fn = keras.losses.SparseCategoricalCrossentropy() 12 
13 # Prepare the training dataset.
14 batch_size = 64
15 train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) 16 train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size) 17 
18 # 本身構造循環
19 for epoch in range(3): 20     print('epoch: ', epoch) 21     for step, (x_batch_train, y_batch_train) in enumerate(train_dataset): 22         # 開一個gradient tape, 計算梯度
23  with tf.GradientTape() as tape: 24             logits = model(x_batch_train) 25             
26             loss_value = loss_fn(y_batch_train, logits) 27             grads = tape.gradient(loss_value, model.trainable_variables) 28  optimizer.apply_gradients(zip(grads, model.trainable_variables)) 29             
30         if step % 200 == 0: 31             print('Training loss (for one batch) at step %s: %s' % (step, float(loss_value))) 32             print('Seen so far: %s samples' % ((step + 1) * 64)) 33
 1 # 訓練並驗證
 2 # Get model
 3 inputs = keras.Input(shape=(784,), name='digits')  4 x = layers.Dense(64, activation='relu', name='dense_1')(inputs)  5 x = layers.Dense(64, activation='relu', name='dense_2')(x)  6 outputs = layers.Dense(10, activation='softmax', name='predictions')(x)  7 model = keras.Model(inputs=inputs, outputs=outputs)  8 
 9 # Instantiate an optimizer to train the model.
10 optimizer = keras.optimizers.SGD(learning_rate=1e-3) 11 # Instantiate a loss function.
12 loss_fn = keras.losses.SparseCategoricalCrossentropy() 13 
14 # Prepare the metrics.
15 train_acc_metric = keras.metrics.SparseCategoricalAccuracy() 16 val_acc_metric = keras.metrics.SparseCategoricalAccuracy() 17 
18 # Prepare the training dataset.
19 batch_size = 64
20 train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) 21 train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size) 22 
23 # Prepare the validation dataset.
24 val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) 25 val_dataset = val_dataset.batch(64) 26 
27 
28 # Iterate over epochs.
29 for epoch in range(3): 30   print('Start of epoch %d' % (epoch,)) 31   
32   # Iterate over the batches of the dataset.
33   for step, (x_batch_train, y_batch_train) in enumerate(train_dataset): 34  with tf.GradientTape() as tape: 35       logits = model(x_batch_train) 36       loss_value = loss_fn(y_batch_train, logits) 37     grads = tape.gradient(loss_value, model.trainable_variables) 38  optimizer.apply_gradients(zip(grads, model.trainable_variables)) 39       
40     # Update training metric.
41  train_acc_metric(y_batch_train, logits) 42 
43     # Log every 200 batches.
44     if step % 200 == 0: 45         print('Training loss (for one batch) at step %s: %s' % (step, float(loss_value))) 46         print('Seen so far: %s samples' % ((step + 1) * 64)) 47 
48   # Display metrics at the end of each epoch.
49   train_acc = train_acc_metric.result() 50   print('Training acc over epoch: %s' % (float(train_acc),)) 51   # Reset training metrics at the end of each epoch
52  train_acc_metric.reset_states() 53 
54   # Run a validation loop at the end of each epoch.
55   for x_batch_val, y_batch_val in val_dataset: 56     val_logits = model(x_batch_val) 57     # Update val metrics
58  val_acc_metric(y_batch_val, val_logits) 59   val_acc = val_acc_metric.result() 60  val_acc_metric.reset_states() 61   print('Validation acc: %s' % (float(val_acc),))
 1 ## 添加本身構造的loss, 每次只能看到最新一次訓練增長的loss
 2 class ActivityRegularizationLayer(layers.Layer):  3   
 4   def call(self, inputs):  5     self.add_loss(1e-2 * tf.reduce_sum(inputs))  6     return inputs  7   
 8 inputs = keras.Input(shape=(784,), name='digits')  9 x = layers.Dense(64, activation='relu', name='dense_1')(inputs) 10 # Insert activity regularization as a layer
11 x = ActivityRegularizationLayer()(x) 12 x = layers.Dense(64, activation='relu', name='dense_2')(x) 13 outputs = layers.Dense(10, activation='softmax', name='predictions')(x) 14 
15 model = keras.Model(inputs=inputs, outputs=outputs) 16 logits = model(x_train[:64]) 17 print(model.losses) 18 logits = model(x_train[:64]) 19 logits = model(x_train[64: 128]) 20 logits = model(x_train[128: 192]) 21 print(model.losses)
 1 # 將loss添加進求導中
 2 optimizer = keras.optimizers.SGD(learning_rate=1e-3)  3 
 4 for epoch in range(3):  5   print('Start of epoch %d' % (epoch,))  6 
 7   for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):  8  with tf.GradientTape() as tape:  9       logits = model(x_batch_train) 10       loss_value = loss_fn(y_batch_train, logits) 11 
12       # Add extra losses created during this forward pass:
13       loss_value += sum(model.losses) 14       
15     grads = tape.gradient(loss_value, model.trainable_variables) 16  optimizer.apply_gradients(zip(grads, model.trainable_variables)) 17 
18     # Log every 200 batches.
19     if step % 200 == 0: 20         print('Training loss (for one batch) at step %s: %s' % (step, float(loss_value))) 21         print('Seen so far: %s samples' % ((step + 1) * 64))

若是還有問題未能獲得解決,搜索887934385交流羣,進入後下載資料工具安裝包等。最後,感謝觀看!