本文是基於TensorRT 5.0.2基礎上,關於其內部的end_to_end_tensorflow_mnist例子的分析和介紹。node
假設當前路徑爲:python
TensorRT-5.0.2.6/samples
其對應當前例子文件目錄樹爲:api
# tree python python ├── common.py ├── end_to_end_tensorflow_mnist │ ├── model.py │ ├── README.md │ ├── requirements.txt │ └── sample.py
其中只有2個文件:數組
- model:該文件包含簡單的訓練模型代碼
- sample:該文件使用UFF mnist模型去建立一個TensorRT inference engine
首先介紹下model.py網絡
# 該腳本包含一個簡單的模型訓練過程 import tensorflow as tf import numpy as np '''main中第一步:獲取數據集 ''' def process_dataset(): # 導入mnist數據集 # 手動下載aria2c -x 16 https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz # 將mnist.npz移動到~/.keras/datasets/ # tf.keras.datasets.mnist.load_data會去讀取~/.keras/datasets/mnist.npz,而不從網絡下載 (x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 # Reshape NUM_TRAIN = 60000 NUM_TEST = 10000 x_train = np.reshape(x_train, (NUM_TRAIN, 28, 28, 1)) x_test = np.reshape(x_test, (NUM_TEST, 28, 28, 1)) return x_train, y_train, x_test, y_test '''main中第二步:構建模型 ''' def create_model(): model = tf.keras.models.Sequential() model.add(tf.keras.layers.InputLayer(input_shape=[28,28, 1])) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(512, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax)) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) return model '''main中第五步:模型存儲 ''' def save(model, filename): output_names = model.output.op.name sess = tf.keras.backend.get_session() # freeze graph frozen_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), [output_names]) # 移除訓練的節點 frozen_graph = tf.graph_util.remove_training_nodes(frozen_graph) # 保存模型 with open(filename, "wb") as ofile: ofile.write(frozen_graph.SerializeToString()) def main(): ''' 1 - 獲取數據''' x_train, y_train, x_test, y_test = process_dataset() ''' 2 - 構建模型''' model = create_model() ''' 3 - 模型訓練''' model.fit(x_train, y_train, epochs = 5, verbose = 1) ''' 4 - 模型評估''' model.evaluate(x_test, y_test) ''' 5 - 模型存儲''' save(model, filename="models/lenet5.pb") if __name__ == '__main__': main()
在得到session
models/lenet5.pb
以後,執行下述命令,將其轉換成uff文件,輸出結果如app
'''該converter會顯示關於input/output nodes的信息,這樣你就能夠用來在解析的時候進行註冊; 本例子中,咱們基於tensorflow.keras的命名規則,事先已知input/output nodes名稱了 ''' [root@30d4bceec4c4 end_to_end_tensorflow_mnist]# convert-to-uff models/lenet5.pb Loading models/lenet5.pb
# 該例子使用UFF MNIST 模型去建立一個TensorRT Inference Engine from random import randint from PIL import Image import numpy as np import pycuda.driver as cuda import pycuda.autoinit # 該import會讓pycuda自動管理CUDA上下文的建立和清理工做 import tensorrt as trt import sys, os # sys.path.insert(1, os.path.join(sys.path[0], "..")) # import common # 這裏將common中的GiB和find_sample_data,allocate_buffers,do_inference等函數移動到該py文件中,保證自包含。 def GiB(val): '''以GB爲單位,計算所須要的存儲值,向左位移10bit表示KB,20bit表示MB ''' return val * 1 << 30 def find_sample_data(description="Runs a TensorRT Python sample", subfolder="", find_files=[]): '''該函數就是一個參數解析函數。 Parses sample arguments. Args: description (str): Description of the sample. subfolder (str): The subfolder containing data relevant to this sample find_files (str): A list of filenames to find. Each filename will be replaced with an absolute path. Returns: str: Path of data directory. Raises: FileNotFoundError ''' # 爲了簡潔,這裏直接將路徑硬編碼到代碼中。 data_root = kDEFAULT_DATA_ROOT = os.path.abspath("/TensorRT-5.0.2.6/python/data/") subfolder_path = os.path.join(data_root, subfolder) if not os.path.exists(subfolder_path): print("WARNING: " + subfolder_path + " does not exist. Using " + data_root + " instead.") data_path = subfolder_path if os.path.exists(subfolder_path) else data_root if not (os.path.exists(data_path)): raise FileNotFoundError(data_path + " does not exist.") for index, f in enumerate(find_files): find_files[index] = os.path.abspath(os.path.join(data_path, f)) if not os.path.exists(find_files[index]): raise FileNotFoundError(find_files[index] + " does not exist. ") if find_files: return data_path, find_files else: return data_path #----------------- TRT_LOGGER = trt.Logger(trt.Logger.WARNING) class ModelData(object): MODEL_FILE = os.path.join(os.path.dirname(__file__), "models/lenet5.uff") INPUT_NAME ="input_1" INPUT_SHAPE = (1, 28, 28) OUTPUT_NAME = "dense_1/Softmax" '''main中第二步:構建engine''' def build_engine(model_file): with trt.Builder(TRT_LOGGER) as builder, \ builder.create_network() as network, \ trt.UffParser() as parser: builder.max_workspace_size = GiB(1) # 解析 Uff 網絡 parser.register_input(ModelData.INPUT_NAME, ModelData.INPUT_SHAPE) parser.register_output(ModelData.OUTPUT_NAME) parser.parse(model_file, network) # 構建並返回一個engine return builder.build_cuda_engine(network) '''main中第三步 ''' def allocate_buffers(engine): inputs = [] outputs = [] bindings = [] stream = cuda.Stream() for binding in engine: size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size dtype = trt.nptype(engine.get_binding_dtype(binding)) # 分配host和device端的buffer host_mem = cuda.pagelocked_empty(size, dtype) device_mem = cuda.mem_alloc(host_mem.nbytes) # 將device端的buffer追加到device的bindings. bindings.append(int(device_mem)) # Append to the appropriate list. if engine.binding_is_input(binding): inputs.append(HostDeviceMem(host_mem, device_mem)) else: outputs.append(HostDeviceMem(host_mem, device_mem)) return inputs, outputs, bindings, stream '''main中第四步 ''' # 從pagelocked_buffer.中讀取測試樣本 def load_normalized_test_case(data_path, pagelocked_buffer, case_num=randint(0, 9)): test_case_path = os.path.join(data_path, str(case_num) + ".pgm") # Flatten該圖像成爲一個1維數組,而後歸一化,並copy到host端的 pagelocked內存中. img = np.array(Image.open(test_case_path)).ravel() np.copyto(pagelocked_buffer, 1.0 - img / 255.0) return case_num '''main中第五步:執行inference ''' # 該函數能夠適應多個輸入/輸出;輸入和輸出格式爲HostDeviceMem對象組成的列表 def do_inference(context, bindings, inputs, outputs, stream, batch_size=1): # 將數據移動到GPU [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs] # 執行inference. context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle) # 將結果從 GPU寫回到host端 [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs] # 同步stream stream.synchronize() # 返回host端的輸出結果 return [out.host for out in outputs] def main(): ''' 1 - 尋找模型文件''' data_path = find_sample_data( description="Runs an MNIST network using a UFF model file", subfolder="mnist") model_file = ModelData.MODEL_FILE ''' 2 - 基於build_engine函數構建engine''' with build_engine(model_file) as engine: ''' 3 - 分配buffer並建立一個流''' inputs, outputs, bindings, stream = allocate_buffers(engine) with engine.create_execution_context() as context: ''' 4 - 讀取測試樣本,並歸一化''' case_num = load_normalized_test_case(data_path, pagelocked_buffer=inputs[0].host) ''' 5 - 執行inference,do_inference函數會返回一個list類型,此處只有一個元素''' [output] = do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream) pred = np.argmax(output) print("Test Case: " + str(case_num)) print("Prediction: " + str(pred)) if __name__ == '__main__': main()
結果如:
dom