TensorFlow實戰4——TensorFlow實現Cifar10識別

  1 import cifar10, cifar10_input
  2 import tensorflow as tf
  3 import numpy as np
  4 import time
  5 import math
  6 
  7 max_steps = 3000
  8 batch_size = 128
  9 data_dir = '/tmp/cifar10_data/cifar-10-batches-bin'
 10 
 11 
 12 def variable_with_weight_loss(shape, stddev, w1):
 13     '''定義初始化weight函數,使用tf.truncated_normal截斷的正態分佈,但加上L2的loss,至關於作了一個L2的正則化處理'''
 14     var = tf.Variable(tf.truncated_normal(shape, stddev=stddev))
 15     '''w1:控制L2 loss的大小,tf.nn.l2_loss函數計算weight的L2 loss'''
 16     if wl is not None:
 17         weight_loss = tf.multiply(tf.nn.l2_loss(var), w1, name='weight_loss')
 18         '''tf.add_to_collection:把weight losses統一存到一個collection,名爲losses'''
 19         tf.add_to_collection('losses', weight_loss)
 20 
 21     return var
 22 
 23 
 24 # 使用cifar10類下載數據集並解壓展開到默認位置
 25 cifar10.maybe_download_and_extract()
 26 
 27 '''distored_inputs函數產生訓練須要使用的數據,包括特徵和其對應的label,
 28 返回已經封裝好的tensor,每次執行都會生成一個batch_size的數量的樣本'''
 29 images_train, labels_train = cifar10_input.distored_inputs(data_dir=data_dir,
 30                                                            batch_size=batch_size)
 31 
 32 images_test, labels_test = cifar10_input.inputs(eval_data=True,
 33                                                 data_dir=data_dir,
 34                                                 batch_size=batch_size)
 35 
 36 image_holder = tf.placeholder(tf.float32, [batch_size, 24, 24, 3])
 37 label_holder = tf.placeholder(tf.int32, [batch_size])
 38 
 39 '''第一個卷積層:使用variable_with_weight_loss函數建立卷積核的參數並進行初始化。
 40 第一個卷積層卷積核大小:5x5 3:顏色通道 64:卷積核數目
 41 weight1初始化函數的標準差爲0.05,不進行正則wl(weight loss)設爲0'''
 42 weight1 = variable_with_weight_loss(shape=[5, 5, 3, 64], stddev=5e-2, wl=0.0)
 43 # tf.nn.conv2d函數對輸入image_holder進行卷積操做
 44 kernel1 = tf.nn.conv2d(image_holder, weight1, [1, 1, 1, 1], padding='SAME')
 45 
 46 bias1 = tf.Variable(tf.constant(0.0, shape=[64]))
 47 
 48 conv1 = tf.nn.relu(tf.nn.bias_add(kernel1, bias1))
 49 # 最大池化層尺寸爲3x3,步長爲2x2
 50 pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1])
 51 # LRN層模仿生物神經系統的'側抑制'機制
 52 norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
 53 
 54 '''第二個卷積層:'''
 55 weight2 = variable_with_weight_loss(shape=[5, 5, 64, 64], stddev=5e-2, wl=0.0)
 56 kernel2 = tf.nn.conv2d(norm1, weight2, [1, 1, 1, 1], padding='SAME')
 57 # bias2初始化爲0.1
 58 bias2 = tf.Variable(tf.constant(0.1, shape=[64]))
 59 
 60 conv2 = tf.nn.relu(tf.nn.bias_add(kernel2, bias2))
 61 norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
 62 pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
 63 
 64 # 全鏈接層
 65 reshape = tf.reshape(pool2, [batch_size, -1])
 66 dim = reshape.get_shape()[1].value
 67 weight3 = variable_with_weight_loss(shape=[dim, 384], stddev=0.04, wl=0.004)
 68 bias3 = tf.Variable(tf.constant(0.1, shape=[384]))
 69 local3 = tf.nn.relu(tf.matmul(reshape, weight3) + bias3)
 70 
 71 # 全鏈接層,隱含層節點數降低了一半
 72 weight4 = variable_with_weight_loss(shape=[384, 182], stddev=0.04, wl=0.004)
 73 bias4 = tf.Variable(tf.constant(0.1, shape=[192]))
 74 local4 = tf.nn.relu(tf.matmul(local3, weight4) + bias4)
 75 
 76 '''正態分佈標準差設爲上一個隱含層節點數的倒數,且不計入L2的正則'''
 77 weight5 = variable_with_weight_loss(shape=[192, 10], stddev=1 / 192.0, wl=0.0)
 78 bias5 = tf.Variable(tf.constant(0.0, shape=[10]))
 79 logits = tf.add(tf.matmul(local4, weight5), bias5)
 80 
 81 
 82 def loss(logits, labels):
 83     '''計算CNN的loss
 84     tf.nn.sparse_softmax_cross_entropy_with_logits做用:
 85     把softmax計算和cross_entropy_loss計算合在一塊兒'''
 86     labels = tf.cast(labels, tf.int64)
 87     cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
 88         logits=logits, labels=labels, name='cross_entropy_per_example')
 89     # tf.reduce_mean對cross entropy計算均值
 90     cross_entropy_mean = tf.reduce_mean(cross_entropy,
 91                                         name='cross_entropy')
 92     # tf.add_to_collection:把cross entropy的loss添加到總體losses的collection中
 93     tf.add_to_collection('losses', cross_entropy_mean)
 94     # tf.add_n將總體losses的collection中的所有loss求和獲得最終的loss
 95     return tf.add_n(tf.get_collection('losses'), name='total_loss')
 96 
 97 
 98 # 將logits節點和label_holder傳入loss計算獲得最終loss
 99 loss = loss(logits, label_holder)
100 
101 train_op = tf.trian.AdamOptimizer(1e-3).minimize(loss)
102 # 求輸出結果中top k的準確率,默認使用top 1(輸出分類最高的那一類的準確率)
103 top_k_op = tf.nn.in_top_k(logits, label_holder, 1)
104 
105 sess = tf.InteractiveSession()
106 tf.global_variables_initializer().run()
107 tf.trian.start_queue_runners()
108 
109 for step in range(max_steps):
110     '''training:'''
111     start_time = time.time()
112     # 得到一個batch的訓練數據
113     image_batch, label_batch = sess.run([images_train, labels_train])
114     # 將batch的數據傳入train_op和loss的計算
115     _, loss_value = sess.run([train_op, loss],
116                              feed_dict={image_holder: image_batch, label_holder: label_batch})
117 
118     duration = time.time() - start_time
119     if step % 10 == 0:
120         # 每秒能訓練的數量
121         examples_per_sec = batch_size / duration
122         # 一個batch數據所花費的時間
123         sec_per_batch = float(duration)
124 
125         format_str = ('step %d, loss=%.2f (%.1f examples/sec; %.3f sec/batch)')
126         print(format_str % (step, loss_value, examples_per_sec, sec_per_batch))
127 # 樣本數
128 num_examples = 10000
129 num_iter = int(math.ceil(num_examples / batch_size))
130 true_count = 0
131 total_sample_count = num_iter * batch_size
132 step = 0
133 while step < num_iter:
134     # 獲取images-test labels_test的batch
135     image_batch, label_batch = sess.run([images_test, labels_test])
136     # 計算這個batch的top 1上預測正確的樣本數
137     preditcions = sess.run([top_k_op], feed_dict={image_holder: image_batch,
138                                                   label_holder: label_batch
139                                                   })
140     # 所有測試樣本中預測正確的數量
141     true_count += np.sum(preditcions)
142     step += 1
143 # 準確率
144 precision = true_count / total_sample_count
145 print('precision @ 1 = %.3f' % precision)
1 step 2970, loss = 0.95 (877.4 examples/sec; 0.146 sec/batch)
2 step 2980, loss = 1.12 (862.6 examples/sec; 0.148 sec/batch)
3 step 2990, loss = 1.06 (967.1 examples/sec; 0.132 sec/batch)
4 precision @ 1 = 0.705
相關文章
相關標籤/搜索