目錄node
import tensorflow as tf a = tf.constant([1.0, 2.0], name='a', dtype=tf.float32) # 定義常量向量 b = tf.constant([2.0, 3.0], name='b') result = a + b # 向量相加 print(result) # 先生成一個會話,經過該會話來計算結果 # sess = tf.Session() sess = tf.InteractiveSession() # 自動將生成的會話註冊爲默認會話 print(sess.run(result)) print(result.eval(session=sess)) sess.close() # 關閉會話,釋放資源
g1 = tf.Graph() # 生成新的計算圖 with g1.as_default(): # 在計算圖g1中定義變量'v',並設置初始值爲0 v = tf.get_variable("v", initializer=tf.zeros(shape=[1])) # 在計算圖g1中讀取變量'v'的值 with tf.Session(graph=g1) as sess: # 經過上下文管理器來使用會話 tf.global_variables_initializer().run() with tf.variable_scope('', reuse=True): print(sess.run(tf.get_variable('v')))
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True) sess1 = tf.InteractiveSession(config=config) sess2 = tf.Session(config=config)
神經網絡解決分類問題的主要步驟:python
全鏈接神經網絡:相鄰兩層之間任意兩個節點之間都有鏈接。算法
TensorFlow支持的隨機數生成函數:編程
tf.random_normal
:正態分佈。tf.truncated_normal
:正態分佈,但若隨機值偏離平均值超過2個標準差,將被從新隨機。tf.random_uniform
:平均分佈。tf.random_gamma
:Gamma分佈。TensorFlow常數生成函數:數組
tf.zeros([2,3], int32)
:產生全0的數組。tf.ones([2,3], int32)
:產生全1的數組。tf.fill([2,3], 9)
:產生一個所有爲給定數字的組合。tf.constant([1,2,3)
:產生一個給定值的常量。# 聲明一個2*3的矩陣變量,並賦予均值爲0,標準差爲2的隨機數 weigths = tf.Variable(tf.random_normal([2, 3], mean=0, stddev=2)) biases = tf.Variable(tf.zeros([3]))
w1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1)) # 該運算的輸出結果即爲張量 w2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1)) x = tf.constant([[0.7, 0.9]]) # 1*2的矩陣 a = tf.matmul(x, w1) # 矩陣乘法 y = tf.matmul(a, w2) with tf.Session() as sess: # sess.run(w1.initializer) # 逐個初始化變量 # sess.run(w2.initializer) # 初始化全部變量 init_op = tf.global_variables_initializer() sess.run(init_op) print(sess.run(y)) print(tf.all_variables)
tf.all_variables
:可拿到當前計算圖上全部的變量。tf.trainable_variables
:獲得全部須要優化的參數。validate_shape=False
。w1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1)) w2 = tf.Variable(tf.random_normal([2, 2], stddev=1, seed=1)) # tf.assign(w1, w2) # wrong tf.assign(w1, w2, validate_shape=False)
# 使用placeholder實現前向傳播算法 w1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1)) w2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1)) # 輸入爲n*2矩陣,前向傳播結果爲n*1的矩陣 # placeholder中數據的維度信息能夠根據提供的數據推導得出,全部不必定要給出 x = tf.placeholder(tf.float32, shape=(3, 2), name='input') a = tf.matmul(x, w1) y = tf.matmul(a, w2) sess = tf.Session() init_op = tf.global_variables_initializer() sess.run(init_op) # print(sess.run(y)) # 某個須要的placeholder沒有被指定取值,報錯 print(sess.run(y, feed_dict={x: [[0.7, 0.9], [0.1, 0.4], [0.5, 0.8]]})) # 指定x的取值
from numpy.random import RandomState batch_size = 8 w1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1)) w2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1)) x = tf.placeholder(tf.float32, shape=(None, 2), name='x-input') y_ = tf.placeholder(tf.float32, shape=(None, 1), name='y-input') a = tf.matmul(x, w1) y = tf.matmul(a, w2) # 定義損失函數和反向傳播的算法 cross_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0))) train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy) # 經過隨機數生成一個模擬數據集 rdm = RandomState(1) dataset_size = 128 X = rdm.rand(dataset_size, 2) Y = [[int(x1 + x2 < 1)] for (x1, x2) in X] # 建立會話 with tf.Session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) print(sess.run(w1)) print(sess.run(w2)) STEPS = 5000 # 訓練的輪數 for i in range(STEPS): # 每次選取batch_size個樣本進行訓練 start = (i * batch_size) % dataset_size end = min(start+batch_size, dataset_size) # 根據樣本訓練神經網絡並更新參數 sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]}) if i % 1000 == 0: # 每一個必定輪數,計算在全部數據上的交叉熵 total_cross_entropy = sess.run(cross_entropy, feed_dict={x: X, y_: Y}) print("After %d training step(s), cross entropy on all data is %g" % (i, total_cross_entropy)) print(sess.run(w1)) print(sess.run(w2))