首先借助qqwweee/keras-yolo3中的convert.py和tensorrt例子yolov3_onnx,並從新編寫了代碼,實現將darknet格式的yolov3的yolov3.cfg和yolov3.weights轉換成keras(tensorflow)的h5格式html
# -*- coding: utf-8 -*- import os import io import argparse import configparser import numpy as np from keras import backend as K from keras.layers import (Conv2D, Input, ZeroPadding2D, Add, UpSampling2D, MaxPooling2D, Concatenate) from keras.layers.advanced_activations import LeakyReLU from keras.layers.normalization import BatchNormalization from keras.models import Model from keras.regularizers import l2 from keras.utils.vis_utils import plot_model as plot def parser(): parser = argparse.ArgumentParser(description="Darknet\'s yolov3.cfg and yolov3.weights \ converted into Keras\'s yolov3.h5!") parser.add_argument('-cfg_path', help='yolov3.cfg') parser.add_argument('-weights_path', help='yolov3.weights') parser.add_argument('-output_path', help='yolov3.h5') parser.add_argument('-weights_only', action='store_true',help='only save weights in yolov3.h5') return parser.parse_args() class WeightLoader(object): def __init__(self,weight_path): self.fhandle = open(weight_path,'rb') self.read_bytes = 0 def parser_buffer(self,shape,dtype='int32',buffer_size=None): self.read_bytes += buffer_size return np.ndarray(shape=shape, dtype=dtype, buffer=self.fhandle.read(buffer_size) ) def head(self): major, minor, revision = self.parser_buffer( shape=(3,), dtype='int32', buffer_size=12) if major*10+minor >= 2 and major < 1000 and minor < 1000: seen = self.parser_buffer( shape=(1,), dtype='int64', buffer_size=8) else: seen = self.parser_buffer( shape=(1,), dtype='int32', buffer_size=4) return major, minor, revision, seen def close(self): self.fhandle.close() class DarkNetParser(object): def __init__(self, cfg_path, weights_path): self.block_gen = self._get_block(cfg_path) self.weight_loader = WeightLoader(weights_path) major, minor, revision, seen = self.weight_loader.head() print('weights header: ',major, minor, revision, seen) self.input_layer = Input(shape=(None, None, 3)) self.out_index = [] self.prev_layer = self.input_layer self.all_layers = [] self.count = [0,0] def _get_block(self,cfg_path): block = {} with open(cfg_path,'r', encoding='utf-8') as fr: for line in fr: line = line.strip() if '[' in line and ']' in line: if block: yield block block = {} block['type'] = line.strip(' []') elif not line or '#' in line: continue else: key,val = line.strip().replace(' ','').split('=') key,val = key.strip(), val.strip() block[key] = val yield block def conv(self, block): '''在讀取darknet的yolov3.weights文件時,順序是 1 - bias; 2 - 若是有bn,則接着讀取三個scale,mean,var 3 - 讀取權重 ''' # Darknet serializes convolutional weights as: # [bias/beta, [gamma, mean, variance], conv_weights] self.count[0] += 1 # read conv block filters = int(block['filters']) size = int(block['size']) stride = int(block['stride']) pad = int(block['pad']) activation = block['activation'] padding = 'same' if pad == 1 and stride == 1 else 'valid' batch_normalize = 'batch_normalize' in block prev_layer_shape = K.int_shape(self.prev_layer) weights_shape = (size, size, prev_layer_shape[-1], filters) darknet_w_shape = (filters, weights_shape[2], size, size) weights_size = np.product(weights_shape) print('+',self.count[0],'conv2d', 'bn' if batch_normalize else ' ', activation, weights_shape) # 讀取濾波器個偏置 conv_bias = self.weight_loader.parser_buffer( shape=(filters,), dtype='float32', buffer_size=filters*4) # 若是有bn,則接着讀取濾波器個scale,mean,var if batch_normalize: bn_weight_list = self.bn(filters, conv_bias) # 讀取權重 conv_weights = self.weight_loader.parser_buffer( shape=darknet_w_shape, dtype='float32', buffer_size=weights_size*4) # DarkNet conv_weights are serialized Caffe-style: # (out_dim, in_dim, height, width) # We would like to set these to Tensorflow order: # (height, width, in_dim, out_dim) conv_weights = np.transpose(conv_weights, [2, 3, 1, 0]) conv_weights = [conv_weights] if batch_normalize else \ [conv_weights, conv_bias] act_fn = None if activation == 'leaky': pass elif activation != 'linear': raise if stride > 1: self.prev_layer = ZeroPadding2D(((1,0),(1,0)))(self.prev_layer) conv_layer = (Conv2D( filters, (size, size), strides=(stride, stride), kernel_regularizer=l2(self.weight_decay), use_bias=not batch_normalize, weights=conv_weights, activation=act_fn, padding=padding))(self.prev_layer) if batch_normalize: conv_layer = BatchNormalization(weights=bn_weight_list)(conv_layer) self.prev_layer = conv_layer if activation == 'linear': self.all_layers.append(self.prev_layer) elif activation == 'leaky': act_layer = LeakyReLU(alpha=0.1)(self.prev_layer) self.prev_layer = act_layer self.all_layers.append(act_layer) def bn(self,filters,conv_bias): '''bn有4個參數,分別是bias,scale,mean,var, 其中bias已經讀取完畢,這裏讀取剩下三個,scale,mean,var ''' bn_weights = self.weight_loader.parser_buffer( shape=(3,filters), dtype='float32', buffer_size=(filters*3)*4) # scale, bias, mean,var bn_weight_list = [bn_weights[0], conv_bias, bn_weights[1], bn_weights[2] ] return bn_weight_list def maxpool(self,block): size = int(block['size']) stride = int(block['stride']) maxpool_layer = MaxPooling2D(pool_size=(size,size), strides=(stride,stride), padding='same')(self.prev_layer) self.all_layers.append(maxpool_layer) self.prev_layer = maxpool_layer def shortcut(self,block): index = int(block['from']) activation = block['activation'] assert activation == 'linear', 'Only linear activation supported.' shortcut_layer = Add()([self.all_layers[index],self.prev_layer]) self.all_layers.append(shortcut_layer) self.prev_layer = shortcut_layer def route(self,block): layers_ids = block['layers'] ids = [int(i) for i in layers_ids.split(',')] layers = [self.all_layers[i] for i in ids] if len(layers) > 1: print('Concatenating route layers:', layers) concatenate_layer = Concatenate()(layers) self.all_layers.append(concatenate_layer) self.prev_layer = concatenate_layer else: skip_layer = layers[0] self.all_layers.append(skip_layer) self.prev_layer = skip_layer def upsample(self,block): stride = int(block['stride']) assert stride == 2, 'Only stride=2 supported.' upsample_layer = UpSampling2D(stride)(self.prev_layer) self.all_layers.append(upsample_layer) self.prev_layer = self.all_layers[-1] def yolo(self,block): self.out_index.append(len(self.all_layers)-1) self.all_layers.append(None) self.prev_layer = self.all_layers[-1] def net(self, block): self.weight_decay = block['decay'] def create_and_save(self,weights_only,output_path): if len(self.out_index) == 0: self.out_index.append( len(self.all_layers)-1 ) output_layers = [self.all_layers[i] for i in self.out_index] model = Model(inputs=self.input_layer, outputs=output_layers) print(model.summary()) if weights_only: model.save_weights(output_path) print('Saved Keras weights to {}'.format(output_path)) else: model.save(output_path) print('Saved Keras model to {}'.format(output_path)) def close(self): self.weight_loader.close() def main(): args = parser() print('loading weights...') cfg_parser = DarkNetParser(args.cfg_path,args.weights_path) print('creating keras model...') layers_fun = {'convolutional':cfg_parser.conv, 'net':cfg_parser.net, 'yolo':cfg_parser.yolo, 'route':cfg_parser.route, 'upsample':cfg_parser.upsample, 'maxpool':cfg_parser.maxpool, 'shortcut':cfg_parser.shortcut } print('Parsing Darknet config.') for ind,block in enumerate(cfg_parser.block_gen): type = block['type'] layers_fun[type](block) cfg_parser.create_and_save(args.weights_only, args.output_path) cfg_parser.close() if __name__ == '__main__': main()
運行結果python
python yolov3_darknet_to_keras.py -cfg_path text.cfg -weights_path yolov3.weights -output_path yolov3c_d2k.h5
<center/>![](https://img2018.cnblogs.com/blog/441382/201903/441382-20190321153317547-1618024530.png) ![](https://img2018.cnblogs.com/blog/441382/201903/441382-20190321153425958-831228099.png)</center> ### 2 將kears(tf)的h5格式轉換成darknet格式的yolov3.weights 其中上面的與下面的名稱轉換 ``` bias -> beta scale -> gamma mean -> moving_mean var -> moving_variance ``` 基於此寫的腳本爲: ```python # -*- coding: utf-8 -*- ''' yolov3_keras_to_darknet.py''' import argparse import numpy import numpy as np import keras from keras.models import load_model from keras import backend as Kgit
def parser(): parser = argparse.ArgumentParser(description="Darknet's yolov3.cfg and yolov3.weights
converted into Keras's yolov3.h5!") parser.add_argument('-cfg_path', help='yolov3.cfg') parser.add_argument('-h5_path', help='yolov3.h5') parser.add_argument('-output_path', help='yolov3.weights') return parser.parse_args()github
class WeightSaver(object):網絡
def __init__(self,h5_path,output_path): self.model = load_model(h5_path) # 若是要讀取keras調用save_weights的h5文件,能夠先讀取一次save的h5, # 而後取消下面的註釋,讀取save_weights的h5
self.layers = {weight.name:weight for weight in self.model.weights} self.sess = K.get_session() self.fhandle = open(output_path,'wb') self._write_head() def _write_head(self): numpy_data = numpy.ndarray(shape=(3,), dtype='int32', buffer=np.array([0,2,0],dtype='int32') ) self.save(numpy_data) numpy_data = numpy.ndarray(shape=(1,), dtype='int64', buffer=np.array([320000],dtype='int64')) self.save(numpy_data) def get_bn_layername(self,num): layer_name = 'batch_normalization_{num}'.format(num=num) bias = self.layers['{0}/beta:0'.format(layer_name)] scale = self.layers['{0}/gamma:0'.format(layer_name)] mean = self.layers['{0}/moving_mean:0'.format(layer_name)] var = self.layers['{0}/moving_variance:0'.format(layer_name)] bias_np = self.get_numpy(bias) scale_np = self.get_numpy(scale) mean_np = self.get_numpy(mean) var_np = self.get_numpy(var) return bias_np,scale_np,mean_np,var_np def get_convbias_layername(self,num): layer_name = 'conv2d_{num}'.format(num=num) bias = self.layers['{0}/bias:0'.format(layer_name)] bias_np = self.get_numpy(bias) return bias_np def get_conv_layername(self,num): layer_name = 'conv2d_{num}'.format(num=num) conv = self.layers['{0}/kernel:0'.format(layer_name)] conv_np = self.get_numpy(conv) return conv_np def get_numpy(self,layer_name): numpy_data = self.sess.run(layer_name) return numpy_data def save(self,numpy_data): bytes_data = numpy_data.tobytes() self.fhandle.write(bytes_data) self.fhandle.flush() def close(self): self.fhandle.close()
class KerasParser(object):session
def __init__(self, cfg_path, h5_path, output_path): self.block_gen = self._get_block(cfg_path) self.weights_saver = WeightSaver(h5_path, output_path) self.count_conv = 0 self.count_bn = 0 def _get_block(self,cfg_path): block = {} with open(cfg_path,'r', encoding='utf-8') as fr: for line in fr: line = line.strip() if '[' in line and ']' in line: if block: yield block block = {} block['type'] = line.strip(' []') elif not line or '#' in line: continue else: key,val = line.strip().replace(' ','').split('=') key,val = key.strip(), val.strip() block[key] = val yield block def close(self): self.weights_saver.close() def conv(self, block): self.count_conv += 1 batch_normalize = 'batch_normalize' in block print('handing.. ',self.count_conv) # 若是bn存在,則先處理bn,順序爲bias,scale,mean,var if batch_normalize: bias,scale,mean,var = self.bn() self.weights_saver.save(bias) scale = scale.reshape(1,-1) mean = mean.reshape(1,-1) var = var.reshape(1,-1) remain = np.concatenate([scale,mean,var],axis=0) self.weights_saver.save(remain) # 不然,先處理biase else: conv_bias = self.weights_saver.get_convbias_layername(self.count_conv) self.weights_saver.save(conv_bias) # 接着處理weights conv_weights = self.weights_saver.get_conv_layername(self.count_conv) # 須要將(height, width, in_dim, out_dim)轉換成(out_dim, in_dim, height, width) conv_weights = np.transpose(conv_weights,[3,2,0,1]) self.weights_saver.save(conv_weights) def bn(self): self.count_bn += 1 bias,scale,mean,var = self.weights_saver.get_bn_layername(self.count_bn) return bias,scale,mean,var
def main(): args = parser() keras_loader = KerasParser(args.cfg_path, args.h5_path, args.output_path)app
for block in keras_loader.block_gen: if 'convolutional' in block['type']: keras_loader.conv(block) keras_loader.close()
if name == "main": main()ide
經過讀取keras保存的h5文件,並讀取其權重,其以下所示,
[<tf.Variable 'conv2d_1/kernel:0' shape=(3, 3, 3, 32) dtype=float32_ref>, <tf.Variable 'batch_normalization_1/gamma:0' shape=(32,) dtype=float32_ref>, <tf.Variable 'batch_normalization_1/beta:0' shape=(32,) dtype=float32_ref>, <tf.Variable 'batch_normalization_1/moving_mean:0' shape=(32,) dtype=float32_ref>, <tf.Variable 'batch_normalization_1/moving_variance:0' shape=(32,) dtype=float32_ref>,spa
<tf.Variable 'conv2d_2/kernel:0' shape=(3, 3, 32, 64) dtype=float32_ref>, <tf.Variable 'batch_normalization_2/gamma:0' shape=(64,) dtype=float32_ref>, <tf.Variable 'batch_normalization_2/beta:0' shape=(64,) dtype=float32_ref>, <tf.Variable 'batch_normalization_2/moving_mean:0' shape=(64,) dtype=float32_ref>, <tf.Variable 'batch_normalization_2/moving_variance:0' shape=(64,) dtype=float32_ref>,.net
<tf.Variable 'conv2d_3/kernel:0' shape=(1, 1, 64, 32) dtype=float32_ref>, <tf.Variable 'batch_normalization_3/gamma:0' shape=(32,) dtype=float32_ref>, <tf.Variable 'batch_normalization_3/beta:0' shape=(32,) dtype=float32_ref>, <tf.Variable 'batch_normalization_3/moving_mean:0' shape=(32,) dtype=float32_ref>, <tf.Variable 'batch_normalization_3/moving_variance:0' shape=(32,) dtype=float32_ref>,
<tf.Variable 'conv2d_4/kernel:0' shape=(3, 3, 32, 64) dtype=float32_ref>, <tf.Variable 'batch_normalization_4/gamma:0' shape=(64,) dtype=float32_ref>, <tf.Variable 'batch_normalization_4/beta:0' shape=(64,) dtype=float32_ref>, <tf.Variable 'batch_normalization_4/moving_mean:0' shape=(64,) dtype=float32_ref>, <tf.Variable 'batch_normalization_4/moving_variance:0' shape=(64,) dtype=float32_ref>,
<tf.Variable 'conv2d_5/kernel:0' shape=(3, 3, 64, 128) dtype=float32_ref>, <tf.Variable 'batch_normalization_5/gamma:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_5/beta:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_5/moving_mean:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_5/moving_variance:0' shape=(128,) dtype=float32_ref>,
<tf.Variable 'conv2d_6/kernel:0' shape=(1, 1, 128, 64) dtype=float32_ref>, <tf.Variable 'batch_normalization_6/gamma:0' shape=(64,) dtype=float32_ref>, <tf.Variable 'batch_normalization_6/beta:0' shape=(64,) dtype=float32_ref>, <tf.Variable 'batch_normalization_6/moving_mean:0' shape=(64,) dtype=float32_ref>, <tf.Variable 'batch_normalization_6/moving_variance:0' shape=(64,) dtype=float32_ref>,
<tf.Variable 'conv2d_7/kernel:0' shape=(3, 3, 64, 128) dtype=float32_ref>, <tf.Variable 'batch_normalization_7/gamma:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_7/beta:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_7/moving_mean:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_7/moving_variance:0' shape=(128,) dtype=float32_ref>,
<tf.Variable 'conv2d_8/kernel:0' shape=(1, 1, 128, 64) dtype=float32_ref>, <tf.Variable 'batch_normalization_8/gamma:0' shape=(64,) dtype=float32_ref>, <tf.Variable 'batch_normalization_8/beta:0' shape=(64,) dtype=float32_ref>, <tf.Variable 'batch_normalization_8/moving_mean:0' shape=(64,) dtype=float32_ref>, <tf.Variable 'batch_normalization_8/moving_variance:0' shape=(64,) dtype=float32_ref>,
<tf.Variable 'conv2d_9/kernel:0' shape=(3, 3, 64, 128) dtype=float32_ref>, <tf.Variable 'batch_normalization_9/gamma:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_9/beta:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_9/moving_mean:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_9/moving_variance:0' shape=(128,) dtype=float32_ref>,
<tf.Variable 'conv2d_10/kernel:0' shape=(3, 3, 128, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_10/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_10/beta:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_10/moving_mean:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_10/moving_variance:0' shape=(256,) dtype=float32_ref>,
<tf.Variable 'conv2d_11/kernel:0' shape=(1, 1, 256, 128) dtype=float32_ref>, <tf.Variable 'batch_normalization_11/gamma:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_11/beta:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_11/moving_mean:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_11/moving_variance:0' shape=(128,) dtype=float32_ref>,
<tf.Variable 'conv2d_12/kernel:0' shape=(3, 3, 128, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_12/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_12/beta:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_12/moving_mean:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_12/moving_variance:0' shape=(256,) dtype=float32_ref>,
<tf.Variable 'conv2d_13/kernel:0' shape=(1, 1, 256, 128) dtype=float32_ref>, <tf.Variable 'batch_normalization_13/gamma:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_13/beta:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_13/moving_mean:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_13/moving_variance:0' shape=(128,) dtype=float32_ref>,
<tf.Variable 'conv2d_14/kernel:0' shape=(3, 3, 128, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_14/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_14/beta:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_14/moving_mean:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_14/moving_variance:0' shape=(256,) dtype=float32_ref>,
<tf.Variable 'conv2d_15/kernel:0' shape=(1, 1, 256, 128) dtype=float32_ref>, <tf.Variable 'batch_normalization_15/gamma:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_15/beta:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_15/moving_mean:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_15/moving_variance:0' shape=(128,) dtype=float32_ref>,
<tf.Variable 'conv2d_16/kernel:0' shape=(3, 3, 128, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_16/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_16/beta:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_16/moving_mean:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_16/moving_variance:0' shape=(256,) dtype=float32_ref>,
<tf.Variable 'conv2d_17/kernel:0' shape=(1, 1, 256, 128) dtype=float32_ref>, <tf.Variable 'batch_normalization_17/gamma:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_17/beta:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_17/moving_mean:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_17/moving_variance:0' shape=(128,) dtype=float32_ref>,
<tf.Variable 'conv2d_18/kernel:0' shape=(3, 3, 128, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_18/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_18/beta:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_18/moving_mean:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_18/moving_variance:0' shape=(256,) dtype=float32_ref>,
<tf.Variable 'conv2d_19/kernel:0' shape=(1, 1, 256, 128) dtype=float32_ref>, <tf.Variable 'batch_normalization_19/gamma:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_19/beta:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_19/moving_mean:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_19/moving_variance:0' shape=(128,) dtype=float32_ref>,
<tf.Variable 'conv2d_20/kernel:0' shape=(3, 3, 128, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_20/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_20/beta:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_20/moving_mean:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_20/moving_variance:0' shape=(256,) dtype=float32_ref>,
<tf.Variable 'conv2d_21/kernel:0' shape=(1, 1, 256, 128) dtype=float32_ref>, <tf.Variable 'batch_normalization_21/gamma:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_21/beta:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_21/moving_mean:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_21/moving_variance:0' shape=(128,) dtype=float32_ref>,
<tf.Variable 'conv2d_22/kernel:0' shape=(3, 3, 128, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_22/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_22/beta:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_22/moving_mean:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_22/moving_variance:0' shape=(256,) dtype=float32_ref>,
<tf.Variable 'conv2d_23/kernel:0' shape=(1, 1, 256, 128) dtype=float32_ref>, <tf.Variable 'batch_normalization_23/gamma:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_23/beta:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_23/moving_mean:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_23/moving_variance:0' shape=(128,) dtype=float32_ref>,
<tf.Variable 'conv2d_24/kernel:0' shape=(3, 3, 128, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_24/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_24/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_25/kernel:0' shape=(1, 1, 256, 128) dtype=float32_ref>, <tf.Variable 'batch_normalization_25/gamma:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_25/beta:0' shape=(128,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_26/kernel:0' shape=(3, 3, 128, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_26/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_26/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_27/kernel:0' shape=(3, 3, 256, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_27/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_27/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_28/kernel:0' shape=(1, 1, 512, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_28/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_28/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_29/kernel:0' shape=(3, 3, 256, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_29/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_29/beta:0' shape=(512,) dtype=float32_ref> ......
<tf.Variable 'conv2d_30/kernel:0' shape=(1, 1, 512, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_30/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_30/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_31/kernel:0' shape=(3, 3, 256, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_31/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_31/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_32/kernel:0' shape=(1, 1, 512, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_32/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_32/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_33/kernel:0' shape=(3, 3, 256, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_33/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_33/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_34/kernel:0' shape=(1, 1, 512, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_34/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_34/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_35/kernel:0' shape=(3, 3, 256, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_35/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_35/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_36/kernel:0' shape=(1, 1, 512, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_36/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_36/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_37/kernel:0' shape=(3, 3, 256, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_37/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_37/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_38/kernel:0' shape=(1, 1, 512, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_38/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_38/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_39/kernel:0' shape=(3, 3, 256, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_39/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_39/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_40/kernel:0' shape=(1, 1, 512, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_40/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_40/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_41/kernel:0' shape=(3, 3, 256, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_41/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_41/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_42/kernel:0' shape=(1, 1, 512, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_42/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_42/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_43/kernel:0' shape=(3, 3, 256, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_43/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_43/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_44/kernel:0' shape=(3, 3, 512, 1024) dtype=float32_ref>, <tf.Variable 'batch_normalization_44/gamma:0' shape=(1024,) dtype=float32_ref>, <tf.Variable 'batch_normalization_44/beta:0' shape=(1024,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_45/kernel:0' shape=(1, 1, 1024, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_45/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_45/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_46/kernel:0' shape=(3, 3, 512, 1024) dtype=float32_ref>, <tf.Variable 'batch_normalization_46/gamma:0' shape=(1024,) dtype=float32_ref>, <tf.Variable 'batch_normalization_46/beta:0' shape=(1024,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_47/kernel:0' shape=(1, 1, 1024, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_47/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_47/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_48/kernel:0' shape=(3, 3, 512, 1024) dtype=float32_ref>, <tf.Variable 'batch_normalization_48/gamma:0' shape=(1024,) dtype=float32_ref>, <tf.Variable 'batch_normalization_48/beta:0' shape=(1024,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_49/kernel:0' shape=(1, 1, 1024, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_49/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_49/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_50/kernel:0' shape=(3, 3, 512, 1024) dtype=float32_ref>, <tf.Variable 'batch_normalization_50/gamma:0' shape=(1024,) dtype=float32_ref>, <tf.Variable 'batch_normalization_50/beta:0' shape=(1024,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_51/kernel:0' shape=(1, 1, 1024, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_51/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_51/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_52/kernel:0' shape=(3, 3, 512, 1024) dtype=float32_ref>, <tf.Variable 'batch_normalization_52/gamma:0' shape=(1024,) dtype=float32_ref>, <tf.Variable 'batch_normalization_52/beta:0' shape=(1024,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_53/kernel:0' shape=(1, 1, 1024, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_53/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_53/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_54/kernel:0' shape=(3, 3, 512, 1024) dtype=float32_ref>, <tf.Variable 'batch_normalization_54/gamma:0' shape=(1024,) dtype=float32_ref>, <tf.Variable 'batch_normalization_54/beta:0' shape=(1024,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_55/kernel:0' shape=(1, 1, 1024, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_55/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_55/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_56/kernel:0' shape=(3, 3, 512, 1024) dtype=float32_ref>, <tf.Variable 'batch_normalization_56/gamma:0' shape=(1024,) dtype=float32_ref>, <tf.Variable 'batch_normalization_56/beta:0' shape=(1024,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_57/kernel:0' shape=(1, 1, 1024, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_57/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_57/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_58/kernel:0' shape=(3, 3, 512, 1024) dtype=float32_ref>, <tf.Variable 'batch_normalization_58/gamma:0' shape=(1024,) dtype=float32_ref>, <tf.Variable 'batch_normalization_58/beta:0' shape=(1024,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_59/kernel:0' shape=(1, 1, 1024, 21) dtype=float32_ref>, <tf.Variable 'conv2d_59/bias:0' shape=(21,) dtype=float32_ref>,
<tf.Variable 'conv2d_60/kernel:0' shape=(1, 1, 512, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_59/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_59/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_61/kernel:0' shape=(1, 1, 768, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_60/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_60/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_62/kernel:0' shape=(3, 3, 256, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_61/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_61/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_63/kernel:0' shape=(1, 1, 512, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_62/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_62/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_64/kernel:0' shape=(3, 3, 256, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_63/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_63/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_65/kernel:0' shape=(1, 1, 512, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_64/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_64/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_66/kernel:0' shape=(3, 3, 256, 512) dtype=float32_ref>, <tf.Variable 'batch_normalization_65/gamma:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'batch_normalization_65/beta:0' shape=(512,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_67/kernel:0' shape=(1, 1, 512, 21) dtype=float32_ref>, <tf.Variable 'conv2d_67/bias:0' shape=(21,) dtype=float32_ref>,
<tf.Variable 'conv2d_68/kernel:0' shape=(1, 1, 256, 128) dtype=float32_ref>, <tf.Variable 'batch_normalization_66/gamma:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_66/beta:0' shape=(128,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_69/kernel:0' shape=(1, 1, 384, 128) dtype=float32_ref>, <tf.Variable 'batch_normalization_67/gamma:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_67/beta:0' shape=(128,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_70/kernel:0' shape=(3, 3, 128, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_68/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_68/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_71/kernel:0' shape=(1, 1, 256, 128) dtype=float32_ref>, <tf.Variable 'batch_normalization_69/gamma:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_69/beta:0' shape=(128,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_72/kernel:0' shape=(3, 3, 128, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_70/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_70/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_73/kernel:0' shape=(1, 1, 256, 128) dtype=float32_ref>, <tf.Variable 'batch_normalization_71/gamma:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_71/beta:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_71/moving_mean:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'batch_normalization_71/moving_variance:0' shape=(128,) dtype=float32_ref>,
<tf.Variable 'conv2d_74/kernel:0' shape=(3, 3, 128, 256) dtype=float32_ref>, <tf.Variable 'batch_normalization_72/gamma:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'batch_normalization_72/beta:0' shape=(256,) dtype=float32_ref>, ......
<tf.Variable 'conv2d_75/kernel:0' shape=(1, 1, 256, 21) dtype=float32_ref>, <tf.Variable 'conv2d_75/bias:0' shape=(21,) dtype=float32_ref>,
運行結果
python yolov3_keras_to_darknet.py -cfg_path text.cfg -h5_path yolov3c_d2k.h5 -output_path yolov3c_d2k_k2d.weights
<center/>![](https://img2018.cnblogs.com/blog/441382/201903/441382-20190321152939710-1147459957.png)</center> 能夠看出原始文件yolov3.weights轉成yolov3c_d2k.h5,而後再轉回來yolov3c_d2k_k2d.weights,而md5值未變,說明逆向轉換成功。 ### 3 實際案例 這裏咱們給出完整的操做過程: 首先,機器環境(我的以爲這裏應該無所謂): 1 - python3.5.6; 2 - keras 2.2.4; 3 - tensorflow-gpu 1.12.0. 而後,去github的[chineseocr](https://github.com/chineseocr/chineseocr)給出的[百度網盤](https://pan.baidu.com/s/1gTW9gwJR6hlwTuyB6nCkzQ#list/path=%2Fchineseocr)下下載: > * text.h5: 經過keras的save_weights方式保存的 > * text.weights:darknet生成的文件 > * text.cfg: darknet中yolov3的網絡結構 若是直接執行
python yolov3_keras_to_darknet.py -cfg_path text.cfg -h5_path text.h5 -output_path test.weights
會報: <center/>![](https://img2018.cnblogs.com/blog/441382/201904/441382-20190426130013495-2030975770.png)</center> 是由於當前h5是經過save_weights方式保存的,而非save方式。 因此咱們先執行
python yolov3_darknet_to_keras.py -cfg_path text.cfg -weights_path text.weights -output_path test.h5
此時是將darknet的結構經過keras的save方式轉換成h5.而後此時執行,就沒問題了:
python h5_to_weights.py -cfg_path text.cfg -h5_path test.h5 -output_path test.weights