import tensorflow as tf m1=tf.constant([[3,3]]) m2=tf.constant([[2],[3]]) product=tf.matmul(m1,m2) print(product) sess=tf.Session() result=sess.run(product) print(result) sess.close() with tf.Session() as sess: result=sess.run(product) print(result)
輸出結果:
[[15]]網絡
import tensorflow as tf x=tf.Variable([1,2]) #Variable表示變量 a=tf.constant([3,3]) #constant表示常量 #增長一個減法op sub=tf.subtract(x,a) #增長一個加法op add=tf.add(x,sub) #變量初始化 init=tf.global_variables_initializer(); with tf.Session() as sess: sess.run(init) #變量初始化 print(sess.run(sub)) print(sess.run(add)) #建立愛你一個變量,而且初始化爲0 state=tf.Variable(0,name='counter') #name是給變量起名字,0表示變量值爲0 #建立一個op,做用是使state加1 new_value=tf.add(state,1) #賦值op:把後面一個賦值給前面一個 update=tf.assign(state,new_value) #變量初始化 init=tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) #變量初始化 print(sess.run(state)) for _ in range(5): sess.run(update) print(sess.run(state))
輸出結果:
[-2 -1]
[-1 1]ide
01
2
3
4
5函數
import tensorflow as tf #Fetch #定義常量 input1=tf.constant(3.0) input2=tf.constant(2.0) input3=tf.constant(5.0) #乘法和加法 add=tf.add(input2,input3) mul=tf.multiply(input1,add) with tf.Session() as sess: result=sess.run([mul,add]) print(result) #Feed #建立佔位符 input1=tf.placeholder(tf.float32) input2=tf.placeholder(tf.float32) output=tf.multiply(input1,input2) #乘法 with tf.Session() as sess: #feed的數據以字典的形式傳入 print(sess.run(output,feed_dict={input1:[8.],input2:[2.]}))
輸出結果:
[21.0, 7.0]學習
[ 16.]code
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data #載入數據集 mnist = input_data.read_data_sets("MNIST_data",one_hot=True) #one_host=True把標籤轉換爲0或1的格式 #定義兩個變量爲每一個批次的大小,每一次放入多少張圖片 batch_size = 100 #計算一共有多少個批次 n_batch = mnist.train.num_examples // batch_size #定義兩個placeholder x = tf.placeholder(tf.float32,[None,784]) y = tf.placeholder(tf.float32,[None,10]) #建立一個簡單的神經網絡 W = tf.Variable(tf.zeros([784,10])) b = tf.Variable(tf.zeros([10])) prediction = tf.nn.softmax(tf.matmul(x,W)+b) #二次代價函數 loss = tf.reduce_mean(tf.square(y-prediction)) #使用梯度降低法 train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss) #初始化變量 init = tf.global_variables_initializer() #結果存放在一個布爾型列表中 correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一維張量中最大的值所在的位置 #求準確率 accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) with tf.Session() as sess: sess.run(init) for epoch in range(21): for batch in range(n_batch): batch_xs,batch_ys = mnist.train.next_batch(batch_size) sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys}) acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}) print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc))
輸出結果:
Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz
Iter 0,Testing Accuracy 0.8302
Iter 1,Testing Accuracy 0.8711
Iter 2,Testing Accuracy 0.8821
Iter 3,Testing Accuracy 0.8881
Iter 4,Testing Accuracy 0.8944
Iter 5,Testing Accuracy 0.8965
Iter 6,Testing Accuracy 0.8997
Iter 7,Testing Accuracy 0.9005
Iter 8,Testing Accuracy 0.9039
Iter 9,Testing Accuracy 0.9046
Iter 10,Testing Accuracy 0.9061
Iter 11,Testing Accuracy 0.9071
Iter 12,Testing Accuracy 0.9073
Iter 13,Testing Accuracy 0.9089
Iter 14,Testing Accuracy 0.9101
Iter 15,Testing Accuracy 0.9106
Iter 16,Testing Accuracy 0.9118
Iter 17,Testing Accuracy 0.9124
Iter 18,Testing Accuracy 0.9127
Iter 19,Testing Accuracy 0.9126
Iter 20,Testing Accuracy 0.9139圖片