github博客傳送門
csdn博客傳送門python
└── keras-and-tensorflow-serving ├── README.md ├── my_image_classifier │ └── 1 │ ├── saved_model.pb # 導出的模型 │ └── variables # 導出模型的參數文件夾 │ ├── variables.data-00000-of-00001 │ └── variables.index ├── test_images # 測試數據 │ ├── car.jpg │ └── car.png └── scripts # 本身的模型 ├── download_inceptionv3_model.py ├── inception.h5 ├── auto_cmd.py ├── export_saved_model.py ├── imagenet_class_index.json └── serving_sample_request.py
# 例: from keras.applications.inception_v3 import InceptionV3 # 導入InceptionV3網絡結構 from keras.layers import Input # 導入輸入 inception_model = InceptionV3(weights='imagenet', input_tensor=Input(shape=(224, 224, 3))) # 建立模型並加載權重 inception_model.save('inception.h5') # 保存網絡模型爲:inception.h5
# 代碼: import tensorflow as tf # 導出路徑包含模型的名稱和版本 tf.keras.backend.set_learning_phase(0) model = tf.keras.models.load_model('./inception.h5') # 須要加載的模型路徑 export_path = '../my_image_classifier/1' # 將要導出模型的路徑 # 獲取Keras會話和保存模型 # 簽名的定義是定義的輸入和輸出張量 with tf.keras.backend.get_session() as sess: tf.saved_model.simple_save( sess, export_path, inputs={'input_image': model.input}, outputs={t.name: t for t in model.outputs})
# 目錄結構 ├── my_image_classifier └── 1 ├── saved_model.pb └── variables ├── variables.data-00000-of-00001 └── variables.index
打開交互模式的Python,進入可執行代碼的環境.輸入:git
tensorflow_model_server --model_base_path=/home/******/PycharmProjects/tensorflow/deployment_testing/my_image_classifier --rest_api_port=9000 --model_name=detection ### --model_base_path:這必須是一個絕對的路徑,不然你會獲得一個錯誤 ### --rest_api_port:Tensorflow服務將在端口8500上啓動gRPC ModelServer,而且REST API將在端口9000上可用。 ### --model_name:這將是您將用於發送POST請求的服務服務器的名稱。您能夠在此處鍵入任何名稱。
文件名:serving_sample_request.py import argparse import json import numpy as np import requests from keras.applications import inception_v3 from keras.preprocessing import image # Argument parser for giving input image_path from command line ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required=True, help="path of the image") args = vars(ap.parse_args()) image_path = args['image'] # Preprocessing our input image img = image.img_to_array(image.load_img(image_path, target_size=(224, 224))) / 255. # this line is added because of a bug in tf_serving(1.10.0-dev) img = img.astype('float16') payload = { "instances": [{'input_image': img.tolist()}] } # sending post request to TensorFlow Serving server r = requests.post('http://localhost:9000/v1/models/ImageClassifier:predict', json=payload) pred = json.loads(r.content.decode('utf-8')) # Decoding the response # decode_predictions(preds, top=5) by default gives top 5 results # You can pass "top=10" to get top 10 predicitons print(json.dumps(inception_v3.decode_predictions(np.array(pred['predictions']))[0]))
python serving_sample_request.py -i ../test_images/car.png
print_r('點個贊吧'); var_dump('點個贊吧'); NSLog(@"點個贊吧!") System.out.println("點個贊吧!"); console.log("點個贊吧!"); print("點個贊吧!"); printf("點個贊吧!\n"); cout << "點個贊吧!" << endl; Console.WriteLine("點個贊吧!"); fmt.Println("點個贊吧!") Response.Write("點個贊吧"); alert(’點個贊吧’)