1 #coding = utf-8 2 3 from datetime import datetime 4 import math 5 import time 6 import tensorflow as tf 7 8 batch_size = 32 9 num_batches = 1000 10 11 def print_activations(t): 12 '''打印每個卷積層或池化層輸出tensor的尺寸 13 t:tensor t.op.name:tensor的名稱 ; 14 t.get-shape.as_list():tensor尺寸''' 15 print(t.op.name, '', t.get_shape().as_list()) 16 17 def interence(images): 18 '''input: images; return: 最後一層pool5及parameters 19 ''' 20 parameters = [] 21 22 with tf.name_scope('conv1') as scope: 23 #定義第一個卷積層,卷積核尺寸爲11x11,顏色通道爲3,卷積核數量爲64 24 kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 64], 25 dtype=tf.float32, stddev=1e-1, name='weights')) 26 #對輸入的images進行卷積操做,strides步長設置爲4x4 27 conv = tf.nn.conv2d(images, kernel, [1, 4, 4, 1], padding='SAME') 28 #biases所有初始化爲0 29 biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), 30 trainable=True, name='biases') 31 #將卷積結果conv和biases 32 bias = tf.nn.bias_add(conv, biases) 33 #rele對結果進行非線性處理 34 conv1 = tf.nn.relu(bias, name=scope) 35 print_activations(conv1) 36 #將這一層的參數kernel和biases添加到parameters 37 parameters += [kernel, biases] 38 #LRN層,depth_radius=4,等都是AlexNet論文中推薦值 39 lrn1 = tf.nn.lrn(conv1, 4, bias=1.0, alpha=0.001/9, beta=0.75, name='lrn1') 40 pool1 = tf.nn.max_pool(lrn1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], 41 padding='VALID', name='pool1') 42 43 print_activations(pool1) 44 45 with tf.name_scope('conv2') as scope: 46 #定義第二個卷積層,卷積核尺寸爲5x5,輸入通道爲64,卷積核數量爲192 47 kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 192], 48 dtype=tf.float32, stddev=1e-1, name='weights')) 49 #對輸入的images進行卷積操做,strides步長設置爲1x1 50 conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME') 51 #biases所有初始化爲0 52 biases = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32), 53 trainable=True, name='biases') 54 #將卷積結果conv和biases 55 bias = tf.nn.bias_add(conv, biases) 56 #rele對結果進行非線性處理 57 conv2 = tf.nn.relu(bias, name=scope) 58 59 #將這一層的參數kernel和biases添加到parameters 60 parameters += [kernel, biases] 61 62 print_activations(conv2) 63 #LRN層,depth_radius=4,等都是AlexNet論文中推薦值 64 lrn2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001/9, beta=0.75, name='lrn2') 65 pool2 = tf.nn.max_pool(lrn2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], 66 padding='VALID', name='pool2') 67 68 print_activations(pool2) 69 70 with tf.name_scope('conv3') as scope: 71 #定義第三個卷積層,卷積核尺寸爲5x5,輸入通道爲64,卷積核數量爲192 72 kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384], 73 dtype=tf.float32, stddev=1e-1, name='weights')) 74 #對輸入的images進行卷積操做,strides步長設置爲1x1 75 conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME') 76 #biases所有初始化爲0 77 biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32), 78 trainable=True, name='biases') 79 #將卷積結果conv和biases 80 bias = tf.nn.bias_add(conv, biases) 81 #rele對結果進行非線性處理 82 conv3 = tf.nn.relu(bias, name=scope) 83 84 #將這一層的參數kernel和biases添加到parameters 85 parameters += [kernel, biases] 86 87 print_activations(conv3) 88 89 90 with tf.name_scope('conv4') as scope: 91 #定義第四個卷積層,卷積核尺寸爲3x3,輸入通道爲384,卷積核數量爲256 92 kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256], 93 dtype=tf.float32, stddev=1e-1, name='weights')) 94 #對輸入的images進行卷積操做,strides步長設置爲1x1 95 conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME') 96 #biases所有初始化爲0 97 biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), 98 trainable=True, name='biases') 99 #將卷積結果conv和biases 100 bias = tf.nn.bias_add(conv, biases) 101 #rele對結果進行非線性處理 102 conv4 = tf.nn.relu(bias, name=scope) 103 104 #將這一層的參數kernel和biases添加到parameters 105 parameters += [kernel, biases] 106 107 print_activations(conv4) 108 109 with tf.name_scope('conv5') as scope: 110 #定義第五個卷積層,卷積核尺寸爲3x3,輸入通道爲256,卷積核數量爲256 111 kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], 112 dtype=tf.float32, stddev=1e-1, name='weights')) 113 #對輸入的images進行卷積操做,strides步長設置爲1x1 114 conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME') 115 #biases所有初始化爲0 116 biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), 117 trainable=True, name='biases') 118 #將卷積結果conv和biases 119 bias = tf.nn.bias_add(conv, biases) 120 #rele對結果進行非線性處理 121 conv5 = tf.nn.relu(bias, name=scope) 122 123 #將這一層的參數kernel和biases添加到parameters 124 parameters += [kernel, biases] 125 126 print_activations(conv5) 127 128 pool5 = tf.nn.max_pool(conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], 129 padding='VALID', name='pool5') 130 print_activations(pool5) 131 return pool5, parameters 132 133 def time_tensorflow_run(session, target, info_string): 134 '''評估AlexNet每輪計算時間 135 target:評測的運算算子 136 info_string:評測的名稱''' 137 num_steps_burn_in = 10#預熱輪數,給程序熱身 138 total_duration = 0.0#總時間 139 total_duration_squared =0.0#計算方差 140 141 for i in range(num_batches + num_steps_burn_in): 142 143 start_time = time.time() 144 _ = session.run(target) 145 duration = time.time()-start_time 146 #在初始熱身的num_steps_burn_in次迭代後每10輪顯示當前迭代所須要的時間 147 if i >= num_steps_burn_in: 148 if not i%10: 149 print('%s: step %d, duration = %.3f' % 150 (datetime.now(), i-num_steps_burn_in, duration)) 151 total_duration +=duration 152 total_duration_squared += duration*duration 153 mn = total_duration/num_batches#每輪迭代平均耗時 154 vr = total_duration_squared/num_batches-mn*mn 155 #平均耗時標準差 156 sd = math.sqrt(vr) 157 158 print('%s: %s across %d steps, %.3f +/-%.3f sec/batch' % 159 (datetime.now(), info_string, num_batches, mn, sd)) 160 161 def run_benchmark(): 162 163 with tf.Graph().as_default(): 164 image_size = 224 165 '''batch_size:每輪迭代樣本數 166 image_size:圖片尺寸 167 3:圖片顏色通道數''' 168 images = tf.Variable(tf.random_normal([batch_size, 169 image_size, 170 image_size, 3], 171 dtype=tf.float32, 172 stddev=1e-1)) 173 pool5, parameters = interence(images) 174 175 init = tf.global_variables_initializer() 176 sess = tf.Session() 177 sess.run(init) 178 179 time_tensorflow_run(sess, pool5, "Forward") 180 181 objective = tf.nn.l2_loss(pool5) 182 grad = tf.gradients(objective, parameters) 183 time_tensorflow_run(sess, grad, "Forward-backward") 184 185 run_benchmark()
1 conv1 [32, 56, 56, 64] 2 pool1 [32, 27, 27, 64] 3 conv2 [32, 27, 27, 192] 4 pool2 [32, 13, 13, 192] 5 conv3 [32, 13, 13, 384] 6 conv4 [32, 13, 13, 256] 7 conv5 [32, 13, 13, 256] 8 pool5 [32, 6, 6, 256] 9 2017-12-20 23:31:19.926000: step 990, duration = 0.197 10 2017-12-20 23:31:19.926000: Forward-backward across 1000 steps, 0.196 +/-0.019 sec/batch 11 2017-12-20 23:31:20.122000: Forward-backward across 1000 steps, 0.196 +/-0.018 sec/batch 12 2017-12-20 23:31:20.319000: Forward-backward across 1000 steps, 0.197 +/-0.017 sec/batch 13 2017-12-20 23:31:20.515000: Forward-backward across 1000 steps, 0.197 +/-0.016 sec/batch 14 2017-12-20 23:31:20.711000: Forward-backward across 1000 steps, 0.197 +/-0.014 sec/batch 15 2017-12-20 23:31:20.907000: Forward-backward across 1000 steps, 0.197 +/-0.013 sec/batch 16 2017-12-20 23:31:21.104000: Forward-backward across 1000 steps, 0.197 +/-0.011 sec/batch 17 2017-12-20 23:31:21.299000: Forward-backward across 1000 steps, 0.197 +/-0.010 sec/batch 18 2017-12-20 23:31:21.494000: Forward-backward across 1000 steps, 0.198 +/-0.007 sec/batch 19 2017-12-20 23:31:21.690000: Forward-backward across 1000 steps, 0.198 +/-0.004 sec/batch