titanic數據集的目標是根據乘客信息預測他們在Titanic號撞擊冰山沉沒後可否生存。json
結構化數據通常會使用Pandas中的DataFrame進行預處理。session
import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras import models,layers dftrain_raw = pd.read_csv('./data/titanic/train.csv') dftest_raw = pd.read_csv('./data/titanic/test.csv') dftrain_raw.head(10)
字段說明:ide
利用Pandas的數據可視化功能咱們能夠簡單地進行探索性數據分析EDA(Exploratory Data Analysis)。svg
label分佈狀況函數
%matplotlib inline %config InlineBackend.figure_format = 'png' ax = dftrain_raw['Survived'].value_counts().plot(kind = 'bar', figsize = (12,8),fontsize=15,rot = 0) ax.set_ylabel('Counts',fontsize = 15) ax.set_xlabel('Survived',fontsize = 15) plt.show()
年齡分佈狀況測試
%matplotlib inline %config InlineBackend.figure_format = 'png' ax = dftrain_raw['Age'].plot(kind = 'hist',bins = 20,color= 'purple', figsize = (12,8),fontsize=15) ax.set_ylabel('Frequency',fontsize = 15) ax.set_xlabel('Age',fontsize = 15) plt.show()
年齡和label的相關性編碼
%matplotlib inline %config InlineBackend.figure_format = 'png' ax = dftrain_raw.query('Survived == 0')['Age'].plot(kind = 'density', figsize = (12,8),fontsize=15) dftrain_raw.query('Survived == 1')['Age'].plot(kind = 'density', figsize = (12,8),fontsize=15) ax.legend(['Survived==0','Survived==1'],fontsize = 12) ax.set_ylabel('Density',fontsize = 15) ax.set_xlabel('Age',fontsize = 15) plt.show()
下面爲正式的數據預處理lua
def preprocessing(dfdata): dfresult= pd.DataFrame() #Pclass dfPclass = pd.get_dummies(dfdata['Pclass']) dfPclass.columns = ['Pclass_' +str(x) for x in dfPclass.columns ] dfresult = pd.concat([dfresult,dfPclass],axis = 1) #Sex dfSex = pd.get_dummies(dfdata['Sex']) dfresult = pd.concat([dfresult,dfSex],axis = 1) #Age dfresult['Age'] = dfdata['Age'].fillna(0) dfresult['Age_null'] = pd.isna(dfdata['Age']).astype('int32') #SibSp,Parch,Fare dfresult['SibSp'] = dfdata['SibSp'] dfresult['Parch'] = dfdata['Parch'] dfresult['Fare'] = dfdata['Fare'] #Carbin dfresult['Cabin_null'] = pd.isna(dfdata['Cabin']).astype('int32') #Embarked dfEmbarked = pd.get_dummies(dfdata['Embarked'],dummy_na=True) dfEmbarked.columns = ['Embarked_' + str(x) for x in dfEmbarked.columns] dfresult = pd.concat([dfresult,dfEmbarked],axis = 1) return(dfresult) x_train = preprocessing(dftrain_raw) y_train = dftrain_raw['Survived'].values x_test = preprocessing(dftest_raw) y_test = dftest_raw['Survived'].values print("x_train.shape =", x_train.shape ) print("x_test.shape =", x_test.shape )
x_train.shape = (712, 15) x_test.shape = (179, 15)
使用Keras接口有如下3種方式構建模型:使用Sequential按層順序構建模型,使用函數式API構建任意結構模型,繼承Model基類構建自定義模型。orm
此處選擇使用最簡單的Sequential,按層順序模型。繼承
tf.keras.backend.clear_session() model = models.Sequential() model.add(layers.Dense(20,activation = 'relu',input_shape=(15,))) model.add(layers.Dense(10,activation = 'relu' )) model.add(layers.Dense(1,activation = 'sigmoid' )) model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense (Dense) (None, 20) 320 _________________________________________________________________ dense_1 (Dense) (None, 10) 210 _________________________________________________________________ dense_2 (Dense) (None, 1) 11 ================================================================= Total params: 541 Trainable params: 541 Non-trainable params: 0 _________________________________________________________________
訓練模型一般有3種方法,內置fit方法,內置train_on_batch方法,以及自定義訓練循環。此處咱們選擇最經常使用也最簡單的內置fit方法。
# 二分類問題選擇二元交叉熵損失函數 model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['AUC']) history = model.fit(x_train,y_train, batch_size= 64, epochs= 30, validation_split=0.2 #分割一部分訓練數據用於驗證 )
Train on 569 samples, validate on 143 samples Epoch 1/30 569/569 [==============================] - 1s 2ms/sample - loss: 3.5841 - AUC: 0.4079 - val_loss: 3.4429 - val_AUC: 0.4129 Epoch 2/30 569/569 [==============================] - 0s 102us/sample - loss: 2.6093 - AUC: 0.3967 - val_loss: 2.4886 - val_AUC: 0.4139 Epoch 3/30 569/569 [==============================] - 0s 68us/sample - loss: 1.8375 - AUC: 0.4003 - val_loss: 1.7383 - val_AUC: 0.4223 Epoch 4/30 569/569 [==============================] - 0s 83us/sample - loss: 1.2545 - AUC: 0.4390 - val_loss: 1.1936 - val_AUC: 0.4765 Epoch 5/30 569/569 [==============================] - ETA: 0s - loss: 1.4435 - AUC: 0.375 - 0s 90us/sample - loss: 0.9141 - AUC: 0.5192 - val_loss: 0.8274 - val_AUC: 0.5584 Epoch 6/30 569/569 [==============================] - 0s 110us/sample - loss: 0.7052 - AUC: 0.6290 - val_loss: 0.6596 - val_AUC: 0.6880 Epoch 7/30 569/569 [==============================] - 0s 90us/sample - loss: 0.6410 - AUC: 0.7086 - val_loss: 0.6519 - val_AUC: 0.6845 Epoch 8/30 569/569 [==============================] - 0s 93us/sample - loss: 0.6246 - AUC: 0.7080 - val_loss: 0.6480 - val_AUC: 0.6846 Epoch 9/30 569/569 [==============================] - 0s 73us/sample - loss: 0.6088 - AUC: 0.7113 - val_loss: 0.6497 - val_AUC: 0.6838 Epoch 10/30 569/569 [==============================] - 0s 79us/sample - loss: 0.6051 - AUC: 0.7117 - val_loss: 0.6454 - val_AUC: 0.6873 Epoch 11/30 569/569 [==============================] - 0s 96us/sample - loss: 0.5972 - AUC: 0.7218 - val_loss: 0.6369 - val_AUC: 0.6888 Epoch 12/30 569/569 [==============================] - 0s 92us/sample - loss: 0.5918 - AUC: 0.7294 - val_loss: 0.6330 - val_AUC: 0.6908 Epoch 13/30 569/569 [==============================] - 0s 75us/sample - loss: 0.5864 - AUC: 0.7363 - val_loss: 0.6281 - val_AUC: 0.6948 Epoch 14/30 569/569 [==============================] - 0s 104us/sample - loss: 0.5832 - AUC: 0.7426 - val_loss: 0.6240 - val_AUC: 0.7030 Epoch 15/30 569/569 [==============================] - 0s 74us/sample - loss: 0.5777 - AUC: 0.7507 - val_loss: 0.6200 - val_AUC: 0.7066 Epoch 16/30 569/569 [==============================] - 0s 79us/sample - loss: 0.5726 - AUC: 0.7569 - val_loss: 0.6155 - val_AUC: 0.7132 Epoch 17/30 569/569 [==============================] - 0s 99us/sample - loss: 0.5674 - AUC: 0.7643 - val_loss: 0.6070 - val_AUC: 0.7255 Epoch 18/30 569/569 [==============================] - 0s 97us/sample - loss: 0.5631 - AUC: 0.7721 - val_loss: 0.6061 - val_AUC: 0.7305 Epoch 19/30 569/569 [==============================] - 0s 73us/sample - loss: 0.5580 - AUC: 0.7792 - val_loss: 0.6027 - val_AUC: 0.7332 Epoch 20/30 569/569 [==============================] - 0s 85us/sample - loss: 0.5533 - AUC: 0.7861 - val_loss: 0.5997 - val_AUC: 0.7366 Epoch 21/30 569/569 [==============================] - 0s 87us/sample - loss: 0.5497 - AUC: 0.7926 - val_loss: 0.5961 - val_AUC: 0.7433 Epoch 22/30 569/569 [==============================] - 0s 101us/sample - loss: 0.5454 - AUC: 0.7987 - val_loss: 0.5943 - val_AUC: 0.7438 Epoch 23/30 569/569 [==============================] - 0s 100us/sample - loss: 0.5398 - AUC: 0.8057 - val_loss: 0.5926 - val_AUC: 0.7492 Epoch 24/30 569/569 [==============================] - 0s 79us/sample - loss: 0.5328 - AUC: 0.8122 - val_loss: 0.5912 - val_AUC: 0.7493 Epoch 25/30 569/569 [==============================] - 0s 86us/sample - loss: 0.5283 - AUC: 0.8147 - val_loss: 0.5902 - val_AUC: 0.7509 Epoch 26/30 569/569 [==============================] - 0s 67us/sample - loss: 0.5246 - AUC: 0.8196 - val_loss: 0.5845 - val_AUC: 0.7552 Epoch 27/30 569/569 [==============================] - 0s 72us/sample - loss: 0.5205 - AUC: 0.8271 - val_loss: 0.5837 - val_AUC: 0.7584 Epoch 28/30 569/569 [==============================] - 0s 74us/sample - loss: 0.5144 - AUC: 0.8302 - val_loss: 0.5848 - val_AUC: 0.7561 Epoch 29/30 569/569 [==============================] - 0s 77us/sample - loss: 0.5099 - AUC: 0.8326 - val_loss: 0.5809 - val_AUC: 0.7583 Epoch 30/30 569/569 [==============================] - 0s 80us/sample - loss: 0.5071 - AUC: 0.8349 - val_loss: 0.5816 - val_AUC: 0.7605
咱們首先評估一下模型在訓練集和驗證集上的效果。
%matplotlib inline %config InlineBackend.figure_format = 'svg' import matplotlib.pyplot as plt def plot_metric(history, metric): train_metrics = history.history[metric] val_metrics = history.history['val_'+metric] epochs = range(1, len(train_metrics) + 1) plt.plot(epochs, train_metrics, 'bo--') plt.plot(epochs, val_metrics, 'ro-') plt.title('Training and validation '+ metric) plt.xlabel("Epochs") plt.ylabel(metric) plt.legend(["train_"+metric, 'val_'+metric]) plt.show()
plot_metric(history,"loss")
plot_metric(history,"AUC")
咱們再看一下模型在測試集上的效果.
model.evaluate(x = x_test,y = y_test)
[0.5191367897907448, 0.8122605]
#預測機率 model.predict(x_test[0:10]) #model(tf.constant(x_test[0:10].values,dtype = tf.float32)) #等價寫法
array([[0.26501188], [0.40970832], [0.44285864], [0.78408605], [0.47650957], [0.43849158], [0.27426785], [0.5962582 ], [0.59476686], [0.17882936]], dtype=float32)
#預測類別 model.predict_classes(x_test[0:10])
array([[0], [0], [0], [1], [0], [0], [0], [1], [1], [0]], dtype=int32)
能夠使用Keras方式保存模型,也能夠使用TensorFlow原生方式保存。前者僅僅適合使用Python環境恢復模型,後者則能夠跨平臺進行模型部署。
推薦使用後一種方式進行保存。
1,Keras方式保存
# 保存模型結構及權重 model.save('./data/keras_model.h5') del model #刪除現有模型 # identical to the previous one model = models.load_model('./data/keras_model.h5') model.evaluate(x_test,y_test)
[0.5191367897907448, 0.8122605]
# 保存模型結構 json_str = model.to_json() # 恢復模型結構 model_json = models.model_from_json(json_str)
#保存模型權重 model.save_weights('./data/keras_model_weight.h5') # 恢復模型結構 model_json = models.model_from_json(json_str) model_json.compile( optimizer='adam', loss='binary_crossentropy', metrics=['AUC'] ) # 加載權重 model_json.load_weights('./data/keras_model_weight.h5') model_json.evaluate(x_test,y_test)
[0.5191367897907448, 0.8122605]
2,TensorFlow原生方式保存
# 保存權重,該方式僅僅保存權重張量 model.save_weights('./data/tf_model_weights.ckpt',save_format = "tf")
# 保存模型結構與模型參數到文件,該方式保存的模型具備跨平臺性便於部署 model.save('./data/tf_model_savedmodel', save_format="tf") print('export saved model.') model_loaded = tf.keras.models.load_model('./data/tf_model_savedmodel') model_loaded.evaluate(x_test,y_test)
[0.5191365896656527, 0.8122605]