1 #coding=utf-8
2 import tensorflow as tf
3 from datetime import datetime
4 import math
5 import time
6 slim = tf.contrib.slim
7 trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
8
9 def inception_v3_arg_scope(weight_decay=0.00004, stddev=0.1,batch_norm_var_collection='moving_vars'):
10
11 '''做用:生成網絡中常常用到的默認參數。
12 weight_decay:L2正則,設爲默認0.00004
13 stddev:標準差'''
14 #batch_normalization參數字典
15 batch_norm_params = {
16 'decay':0.9997,#衰減係數
17 'epsilon':0.001,
18 'updates_collections':tf.GraphKeys.UPDATE_OPS,
19 'variables_collections':{
20 'beta':None,
21 'gamma':None,
22 'moving_mean':[batch_norm_var_collection],
23 'moving_variance':[batch_norm_var_collection],
24 }
25 }
26 #slim.arg_scope給函數參數自動賦予默認值
27 with slim.arg_scope([slim.conv2d, slim.fully_connected],
28 weights_regularizer=slim.l2_regularizer(weight_decay)):
29 #嵌套slim.arg_scope
30 with slim.arg_scope(
31 [slim.conv2d],
32 weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
33 activation_fn=tf.nn.relu,
34 normalizer_fn=slim.batch_norm,
35 normalizer_params=batch_norm_params) as sc:
36 return sc
37
38 def inception_v3_base(input, scope=None):
39 '''做用生成v3網絡的卷積部分
40 input:輸入圖片數據的tensor
41 scope:函數默認的參數環境'''
42
43 end_points = {}#保存關鍵節點
44
45 with tf.variable_scope(scope, 'InceptionsV3', [inputs]):
46
47 with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
48 stride=1, padding='VALID'):
49 '''定義5個卷積層和2個最大池化層, 輸入尺寸爲299x299x3,輸出尺寸爲35x35x192
50 輸入:tensor,32:輸出的通道數,[3, 3]:卷積核尺寸,stride:步長'''
51 net = slim.conv2d(inputs, 32, [3, 3], stride=2, scope='Conv2d_1a_3x3')
52 net = slim.conv2d(net, 32, [3, 3], scope='Conv2d_2a_3x3')
53 net = slim.conv2d(net, 64, [3, 3], padding='SAME', scope='Conv2d_2b_3x3')
54 net = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_3a_3x3' )
55 net = slim.conv2d(net, 80, [1, 1], scope='Conv2d_3b_1x1')
56 net = slim.conv2d(net, 192, [3, 3], scope='Conv2d_4a_3x3')
57 net = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_5a_3x3')
58
59 with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'):
60 '''定義3個連續的Inception模塊組'''
61
62 with tf.variable_scope('Mixed_5b'):
63
64 with tf.variable_scope('Branch_0'):
65 '''64:輸出通道 1x1的卷積核'''
66 branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
67 with tf.variable_scope('Branch_1'):
68 '''48輸出通道的1x1卷積核鏈接64輸出通道的5x5卷積核'''
69 branch_1 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
70 branch_1 = slim.conv2d(branch_1, 64, [5, 5], scope='Conv2d_0b_5x5')
71 with tf.variable_scope('Branch_2'):
72 '''64輸出通道的1x1卷積核鏈接2個96輸出通道的3x3卷積核'''
73 branch_2 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
74 branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
75 branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
76 with tf.variable_scope('Branch_3'):
77 '''3x3的平均池化鏈接32輸出通道的1x1卷積核'''
78 branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
79 branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
80 '''tf.concat將4分支合併在一塊兒,造成module的最終輸出35x35x(64+64+96+32)'''
81 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
82
83
84 with tf.variable_scope('Mixed_5c'):
85 with tf.variable_scope('Branch_0'):
86 '''64:輸出通道 1x1的卷積核'''
87 branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
88 with tf.variable_scope('Branch_1'):
89 '''48輸出通道的1x1卷積核鏈接64輸出通道的5x5卷積核'''
90 branch_1 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0b_1x1')
91 branch_1 = slim.conv2d(branch_1, 64, [5, 5], scope='Conv2d_0c_5x5')
92 with tf.variable_scope('Branch_2'):
93 '''64輸出通道的1x1卷積核鏈接2個96輸出通道的3x3卷積核'''
94 branch_2 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
95 branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
96 branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
97 with tf.variable_scope('Branch_3'):
98 '''3x3的平均池化鏈接32輸出通道的1x1卷積核'''
99 branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
100 branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
101 '''tf.concat將4分支合併在一塊兒,造成module的最終輸出35x35x(64+64+96+64)'''
102 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
103
104
105 with tf.variable_scope('Mixed_5d'):
106
107 with tf.variable_scope('Branch_0'):
108 '''64:輸出通道 1x1的卷積核'''
109 branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
110 with tf.variable_scope('Branch_1'):
111 '''48輸出通道的1x1卷積核鏈接64輸出通道的5x5卷積核'''
112 branch_1 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0b_1x1')
113 branch_1 = slim.conv2d(branch_1, 64, [5, 5], scope='Conv2d_0c_5x5')
114 with tf.variable_scope('Branch_2'):
115 '''64輸出通道的1x1卷積核鏈接2個96輸出通道的3x3卷積核'''
116 branch_2 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
117 branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
118 branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
119 with tf.variable_scope('Branch_3'):
120 '''3x3的平均池化鏈接32輸出通道的1x1卷積核'''
121 branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
122 branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
123 '''tf.concat將4分支合併在一塊兒,造成module的最終輸出35x35x(64+64+96+64)'''
124 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
125
126 with tf.variable_scope('Mixed_6a'):
127
128 with tf.variable_scope('Branch_0'):
129 '''384:輸出通道 3x3的卷積核 因爲步長爲2,輸出尺寸爲17x17x384'''
130 branch_0 = slim.conv2d(net, 384, [3, 3], stride=2,
131 padding='VALID', scope='Conv2d_1a_1x1')
132 with tf.variable_scope('Branch_1'):
133 '''64輸出通道的1x1卷積核鏈接2個96輸出通道的3x3卷積核,輸出尺寸爲17x17x96 '''
134 branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
135 branch_1 = slim.conv2d(branch_1, 96, [3, 3],
136 scope='Conv2d_0b_3x3')
137 branch_1 = slim.conv2d(branch_1, 96, [3, 3], stride=2,
138 padding='VALID', scope='Conv2d_1a_3x3')
139 with tf.variable_scope('Branch_2'):
140 '''3x3最大池化層, 輸出尺寸爲17x17x256'''
141 branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
142 padding='VALID', scope='MaxPool_1a_3x3')
143
144 '''tf.concat將3分支合併在一塊兒,造成module的最終輸出17x17x(384+48+64)'''
145 net = tf.concat([branch_0, branch_1, branch_2], 3)
146
147 with tf.variable_scope('Mixed_6b'):
148
149 with tf.variable_scope('Branch_0'):
150 '''192:輸出通道 1x1的卷積核 '''
151 branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
152
153 with tf.variable_scope('Branch_1'):
154 '''第一層:128輸出通道的1x1的卷積
155 第二層:128輸出通道的1x7的卷積
156 第三層:192輸出通道的7x1的卷積'''
157 branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
158 branch_1 = slim.conv2d(branch_1, 128, [1, 7], scope='Conv2d_0b_1x7')
159 branch_1 = slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
160
161 with tf.variable_scope('Branch_2'):
162 '''第一層:128輸出通道的1x1的卷積
163 第二層:128輸出通道的7x1的卷積
164 第三層:128輸出通道的1x7的卷積
165 第四層:128輸出通道的7x1的卷積
166 第五層:192輸出通道的1x7的卷積'''
167 branch_2 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
168 branch_2 = slim.conv2d(branch_2, 128, [7, 1], scope='Conv2d_0b_7x1')
169 branch_2 = slim.conv2d(branch_2, 128, [1, 7], scope='Conv2d_0c_1x1')
170 branch_2 = slim.conv2d(branch_2, 128, [7, 1], scope='Conv2d_0d_7x1')
171 branch_2 = slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0e_7x1')
172
173 with tf.variable_scope('Branch_3'):
174 '''3x3的平均池化鏈接192輸出通道的1x1卷積核'''
175 branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
176 branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
177
178 '''tf.concat將4分支合併在一塊兒,造成module的最終輸出17x17x(192+192+192+192)'''
179 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
180
181 with tf.variable_scope('Mixed_6c'):
182 with tf.variable_scope('Branch_0'):
183 '''192:輸出通道 1x1的卷積核 '''
184 branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
185
186 with tf.variable_scope('Branch_1'):
187 '''第一層:160輸出通道的1x1的卷積
188 第二層:160輸出通道的1x7的卷積
189 第三層:192輸出通道的7x1的卷積'''
190 branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
191 branch_1 = slim.conv2d(branch_1, 160, [1, 7], scope='Conv2d_0b_1x7')
192 branch_1 = slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
193
194 with tf.variable_scope('Branch_2'):
195 '''第一層:160輸出通道的1x1的卷積
196 第二層:160輸出通道的7x1的卷積
197 第三層:160輸出通道的1x7的卷積
198 第四層:160輸出通道的7x1的卷積
199 第五層:192輸出通道的1x7的卷積'''
200 branch_2 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
201 branch_2 = slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0b_7x1')
202 branch_2 = slim.conv2d(branch_2, 160, [1, 7], scope='Conv2d_0c_1x1')
203 branch_2 = slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0d_7x1')
204 branch_2 = slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0e_7x1')
205
206 with tf.variable_scope('Branch_3'):
207 '''3x3的平均池化鏈接192輸出通道的1x1卷積核'''
208 branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
209 branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
210
211 '''tf.concat將4分支合併在一塊兒,造成module的最終輸出17x17x(192+192+192+192)'''
212 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
213
214
215 with tf.variable_scope('Mixed_6e'):
216 with tf.variable_scope('Branch_0'):
217 '''192:輸出通道 1x1的卷積核 '''
218 branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
219
220 with tf.variable_scope('Branch_1'):
221 '''第一層:160輸出通道的1x1的卷積
222 第二層:160輸出通道的1x7的卷積
223 第三層:192輸出通道的7x1的卷積'''
224 branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
225 branch_1 = slim.conv2d(branch_1, 160, [1, 7], scope='Conv2d_0b_1x7')
226 branch_1 = slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
227
228 with tf.variable_scope('Branch_2'):
229 '''第一層:160輸出通道的1x1的卷積
230 第二層:160輸出通道的7x1的卷積
231 第三層:160輸出通道的1x7的卷積
232 第四層:160輸出通道的7x1的卷積
233 第五層:192輸出通道的1x7的卷積'''
234 branch_2 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
235 branch_2 = slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0b_7x1')
236 branch_2 = slim.conv2d(branch_2, 160, [1, 7], scope='Conv2d_0c_1x1')
237 branch_2 = slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0d_7x1')
238 branch_2 = slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0e_7x1')
239
240 with tf.variable_scope('Branch_3'):
241 '''3x3的平均池化鏈接192輸出通道的1x1卷積核'''
242 branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
243 branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
244
245 '''tf.concat將4分支合併在一塊兒,造成module的最終輸出17x17x(192+192+192+192)'''
246 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
247
248 with tf.variable_scope('Mixed_6d'):
249 with tf.variable_scope('Branch_0'):
250 '''192:輸出通道 1x1的卷積核 '''
251 branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
252
253 with tf.variable_scope('Branch_1'):
254 '''第一層:160輸出通道的1x1的卷積
255 第二層:160輸出通道的1x7的卷積
256 第三層:192輸出通道的7x1的卷積'''
257 branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
258 branch_1 = slim.conv2d(branch_1, 160, [1, 7], scope='Conv2d_0b_1x7')
259 branch_1 = slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
260
261 with tf.variable_scope('Branch_2'):
262 '''第一層:160輸出通道的1x1的卷積
263 第二層:160輸出通道的7x1的卷積
264 第三層:160輸出通道的1x7的卷積
265 第四層:160輸出通道的7x1的卷積
266 第五層:192輸出通道的1x7的卷積'''
267 branch_2 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
268 branch_2 = slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0b_7x1')
269 branch_2 = slim.conv2d(branch_2, 160, [1, 7], scope='Conv2d_0c_1x1')
270 branch_2 = slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0d_7x1')
271 branch_2 = slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0e_7x1')
272
273 with tf.variable_scope('Branch_3'):
274 '''3x3的平均池化鏈接192輸出通道的1x1卷積核'''
275 branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
276 branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
277
278 '''tf.concat將4分支合併在一塊兒,造成module的最終輸出17x17x(192+192+192+192)'''
279 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
280 '''將Mixed_6e存儲於end_points中,做爲Auxiliary Classifier 輔助模型的分類'''
281 end_points['Mixed_6e'] = net
282
283 with tf.variable_scope('Mixed_7a'):
284 with tf.variable_scope('Branch_0'):
285 '''192:輸出通道 1x1的卷積核再鏈接320輸出通道數的3x3的卷積,步長爲2,
286 因爲padding爲VALID, 圖片尺寸壓縮爲8x8'''
287 branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
288 branch_0 = slim.conv2d(branch_0, 320, [3, 3], stride=2,
289 padding='VALID', scope='Conv2d_1a_3x3')
290
291 with tf.variable_scope('Branch_1'):
292 '''第一層:192輸出通道的1x1的卷積
293 第二層:192輸出通道的1x7的卷積
294 第三層:192輸出通道的7x1的卷積
295 第四層:192輸出通道的3x3的卷積
296 tensor輸出尺寸爲8x8x192'''
297 branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
298 branch_1 = slim.conv2d(branch_1, 192, [1, 7], scope='Conv2d_0b_1x7')
299 branch_1 = slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
300 branch_1 = slim.conv2d(branch_1, 192, [3, 3], stride=2,
301 padding='VALID', scope='Conv2d_1a_3x3')
302 with tf.variable_scope('Branch_2'):
303 '''3x3的最大池化層,步長爲2,padding爲VALID,因爲池化層對輸出通道不會產生改變
304 故輸出尺寸爲8x8x768'''
305 branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
306 padding='VALID', scope='MaxPool_1a_3x3')
307 '''tf.concat將3分支合併在一塊兒,造成module的最終輸出8x8x(320+192+768)'''
308 net = tf.concat([branch_0, branch_1, branch_2], 3)
309
310 with tf.variable_scope('Mixed_7b'):
311
312 with tf.variable_scope('Branch_0'):
313 '''320輸出通道的1x1的卷積'''
314 branch_0 = slim.conv2d(net, 320, [1, 1], scope='Conv2d_0a_1x1')
315
316 with tf.variable_scope('Branch_1'):
317 '''第一層:1個384輸出通道的1x1卷積
318 第二層:由384輸出通道的1x3的卷積和384輸出通道的3x1的卷積合併
319 tensor輸出尺寸爲:8x8x(384+384)=8x8x768'''
320 branch_1 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
321 branch_1 = tf.concat([
322 slim.conv2d(branch_1, 384, [1, 3], scope='Conv2d_0b_1x3'),
323 slim.conv2d(branch_1, 384, [3, 1], scope='Conv2d_0c_3x1'),], 3)
324
325
326 with tf.variable_scope('Branch_2'):
327 '''第一層:1個448輸出通道的1x1卷積
328 第二層:384輸出通道的3x3的卷積分支內拆成兩個分支分別是:
329 384輸出通道的1x3的卷積和384輸出通道的3x1的卷積最後合併
330 tensor輸出尺寸爲:8x8x(384+384)=8x8x768'''
331 branch_2 = slim.conv2d(net, 448, [1, 1], scope='Conv2d_0a_1x1')
332 branch_2 = slim.conv2d(branch_2, 448, [3, 3], scope='Conv2d_0b_3x3')
333 branch_2 = tf.concat([
334 slim.conv2d(branch_2, 384, [1, 3], scope='Conv2d_0c_1x3'),
335 slim.conv2d(branch_2, 384, [3, 1], scope='Conv2d_0d_3x1'),], 3)
336
337 with tf.variable_scope('Branch_3'):
338 '''3x3平均池化層後接一個192輸出通道的1x1卷積
339 tensor輸出尺寸爲:8x8x192'''
340 branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
341 branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
342
343 '''tf.concat將3分支合併在一塊兒,造成module的最終輸出8x8x(320+768+768+192)'''
344 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
345
346 with tf.variable_scope('Mixed_7c'):
347 with tf.variable_scope('Branch_0'):
348 '''320輸出通道的1x1的卷積'''
349 branch_0 = slim.conv2d(net, 320, [1, 1], scope='Conv2d_0a_1x1')
350
351 with tf.variable_scope('Branch_1'):
352 '''第一層:1個384輸出通道的1x1卷積
353 第二層:由384輸出通道的1x3的卷積和384輸出通道的3x1的卷積合併
354 tensor輸出尺寸爲:8x8x(384+384)=8x8x768'''
355 branch_1 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
356 branch_1 = tf.concat([
357 slim.conv2d(branch_1, 384, [1, 3], scope='Conv2d_0b_1x3'),
358 slim.conv2d(branch_1, 384, [3, 1], scope='Conv2d_0c_3x1'), ], 3)
359
360 with tf.variable_scope('Branch_2'):
361 '''第一層:1個448輸出通道的1x1卷積
362 第二層:384輸出通道的3x3的卷積分支內拆成兩個分支分別是:
363 384輸出通道的1x3的卷積和384輸出通道的3x1的卷積最後合併
364 tensor輸出尺寸爲:8x8x(384+384)=8x8x768'''
365 branch_2 = slim.conv2d(net, 448, [1, 1], scope='Conv2d_0a_1x1')
366 branch_2 = slim.conv2d(branch_2, 448, [3, 3], scope='Conv2d_0b_3x3')
367 branch_2 = tf.concat([
368 slim.conv2d(branch_2, 384, [1, 3], scope='Conv2d_0c_1x3'),
369 slim.conv2d(branch_2, 384, [3, 1], scope='Conv2d_0d_3x1'), ], 3)
370
371 with tf.variable_scope('Branch_3'):
372 '''3x3平均池化層後接一個192輸出通道的1x1卷積
373 tensor輸出尺寸爲:8x8x192'''
374 branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
375 branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
376
377 '''tf.concat將3分支合併在一塊兒,造成module的最終輸出8x8x(320+768+768+192)'''
378 net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
379
380 return net, end_points
381
382
383 def inception_v3(inputs,
384 num_classes=1000,
385 is_training=True,
386 dropout_keep_prob=0.8,
387 prediction_fn=slim.softmax,
388 spatial_squeeze=True,
389 reuse=None,
390 scope='Inceptionv3'):
391 '''做用:全局平均池化、softmax、Auxiliary Logits
392 num_classes:最後須要分類的數量
393 is_training:是不是訓練過程標誌
394 dropout_keep_prob:Dropout所需保留節點的比例
395 prediction_fn:最後用來分類的函數
396 spatial_squeeze:是否進行squeeze標誌,(去除維數的操做5x3x1->5x3)
397 reuse:是否對網絡和Variable進行重複使用的標誌
398 scope:包含默認參數的環境'''
399
400 '''使用tf.variable_scope定義網絡name、 reuse等參數'''
401 with tf.variable_scope(scope, 'InceptionV3', [inputs, num_classes], reuse=reuse) as scope:
402 '''使用slim.arg_scope定義Batch Normalization和Dropout的is_training標誌的默認值'''
403 with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
404 #獲取最後一層輸出net,end_points
405 net, end_points = inception_v3_base(inputs, scope=scope)
406
407 with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
408 stride=1, padding='SAME'):
409 '''處理Auxiliary Logits部分的邏輯:
410 使用slim.avg_pool2d將卷積、最大池化、
411 平均池化的默認步長設爲3,padding設爲VALID,end_points獲取Mixed_6e'''
412
413 aux_logits = end_points['Mixed_6e']
414
415 with tf.variable_scope('AuxLogits'):
416 '''Mixed_6e接5x5平均池化,步長爲3,padding爲VALID
417 輸出尺寸:17x17x768->5x5x768((17-5+1)/3=5)'''
418
419 aux_logits = slim.avg_pool2d(
420 aux_logits, [5, 5], stride=3, padding='VALID',
421 scope='AvgPool_1a_5x5')
422 '''128輸出通道的1x1卷積'''
423 aux_logits = slim.conv2d(
424 aux_logits, 128, [1, 1], scope = 'Conv2d_1b_1x1')
425 '''768輸出通道的5x5卷積,
426 輸出尺寸變爲1x1x768'''
427 aux_logits = slim.conv2d(
428 aux_logits, 768, [5, 5],
429 weights_initializer=trunc_normal(0.01),
430 padding='VALID', scope='Conv2d_2a_5x5')
431 '''輸出通道爲num_classes的1x1的卷積,
432 輸出尺寸變爲1x1x1000'''
433 aux_logits = slim.conv2d(
434 aux_logits, num_classes, [1, 1], activation_fn=None,
435 normalizer_fn=None, weights_initializer=trunc_normal(0.01),
436 scope='Conv2d_2b_1x1')
437
438 if spatial_squeeze:
439 '''將1x1x1000經過tf.squeeze變爲1000存儲到end_points'''
440 aux_logits = tf.squeeze(aux_logits, [1, 2],
441 name='SpatialSqueeze')
442 end_points['AuxLogits'] = aux_logits
443
444 with tf.variable_scope('Logits'):
445 '''處理正常分類預測的邏輯:
446 對Mixed_7e最後一個卷積層的輸出進行8x8全局平均化池化,padding爲VALID
447 輸出尺寸:8x8x2048->1x1x2048
448 '''
449 net = slim.avg_pool2d(net, [8, 8], padding='VALID', scope='AvgPool_1a_8x8')
450 '''鏈接一個Dropout層,節點保留率:dropout_keep_prob'''
451 net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
452
453 end_points['PreLogits'] = net
454 '''1000輸出通道1x1的卷積'''
455 logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
456 normalizer_fn=None, scope='Conv2d_1c_1x1')
457
458 if spatial_squeeze:
459 '''tf.sqeeze去一維'''
460 logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
461
462 end_points['Logits'] = logits
463 '''接softmax對結果進行分類預測'''
464 end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
465 '''返回結果logits,包含輔助節點的end_points'''
466 return logits, end_points
467
468 def time_tensorflow_run(session, target, info_string):
469
470 num_steps_burn_in = 10
471 total_duration = 0.0
472 total_duration_squared = 0.0
473 for i in range(num_batches+num_steps_burn_in):
474 start_time = time.time()
475 _ = session.run(target)
476 duration = time.time()-start_time
477
478 if i >= num_steps_burn_in:
479 if not i % 10:
480 print('%s: step %d, duration = %.3f' %(datetime.now(), i-num_steps_burn_in, duration))
481 total_duration += duration
482 total_duration_squared += duration*duration
483
484 mn = total_duration/num_batches
485 vr = total_duration_squared/num_batches-mn*mn
486 sd = math.sqrt(vr)
487
488 print('%s: %s across %d steps, %.3f +/- %3.3f sec/batch' %(datetime.now(), info_string, num_batches, mn, sd))
489
490 batch_size = 32
491 height, width = 299, 299
492
493 inputs = tf.random_uniform((batch_size, height, width, 3))
494
495 with slim.arg_scope(inception_v3_arg_scope()):
496 logits, end_points = inception_v3(inputs, is_training=False)
497
498 init = tf.global_variables_initializer()
499 sess = tf.Session()
500 sess.run(init)
501 num_batches = 100
502 time_tensorflow_run(sess, logits, "Forward")