# coding=utf-8 import tensorflow as tf import numpy as np import matplotlib.pyplot as plt; with tf.variable_scope('V1') as scope: a1 = tf.get_variable(name='a1', shape=[1], initializer=tf.constant_initializer(1)) scope.reuse_variables() a3 = tf.get_variable('a1') with tf.Session() as sess: sess.run(tf.initialize_all_variables()) print a1.name print sess.run(a1) print a3.name print sess.run(a3)
等同於html
with tf.variable_scope('V1'): a1 = tf.get_variable(name='a1', shape=[1], initializer=tf.constant_initializer(1)) with tf.variable_scope('V1', reuse=True): a3 = tf.get_variable('a1') with tf.Session() as sess: sess.run(tf.initialize_all_variables()) print a1.name print sess.run(a1) print a3.name print sess.run(a3)
相同大小Tensor之間的任何算術運算都會將運算應用到元素級。python
# 算術操做符:+ - * / % tf.add(x, y, name=None) # 加法(支持 broadcasting) tf.subtract(x, y, name=None) # 減法 tf.multiply(x, y, name=None) # 乘法 tf.divide(x, y, name=None) # 浮點除法, 返回浮點數(python3 除法) tf.mod(x, y, name=None) # 取餘 # 冪指對數操做符:^ ^2 ^0.5 e^ ln tf.pow(x, y, name=None) # 冪次方 tf.square(x, name=None) # 平方 tf.sqrt(x, name=None) # 開根號,必須傳入浮點數或複數 tf.exp(x, name=None) # 計算 e 的次方 tf.log(x, name=None) # 以 e 爲底,必須傳入浮點數或複數 # 取符號、負、倒數、絕對值、近似、兩數中較大/小的 tf.negative(x, name=None) # 取負(y = -x). tf.sign(x, name=None) # 返回 x 的符號 tf.reciprocal(x, name=None) # 取倒數 tf.abs(x, name=None) # 求絕對值 tf.round(x, name=None) # 四捨五入 tf.ceil(x, name=None) # 向上取整 tf.floor(x, name=None) # 向下取整 tf.rint(x, name=None) # 取最接近的整數 tf.maximum(x, y, name=None) # 返回兩tensor中的最大值 (x > y ? x : y) tf.minimum(x, y, name=None) # 返回兩tensor中的最小值 (x < y ? x : y) # 三角函數和反三角函數 tf.cos(x, name=None) tf.sin(x, name=None) tf.tan(x, name=None) tf.acos(x, name=None) tf.asin(x, name=None) tf.atan(x, name=None) # 其它 tf.div(x, y, name=None) # python 2.7 除法, x/y-->int or x/float(y)-->float tf.truediv(x, y, name=None) # python 3 除法, x/y-->float tf.floordiv(x, y, name=None) # python 3 除法, x//y-->int tf.realdiv(x, y, name=None) tf.truncatediv(x, y, name=None) tf.floor_div(x, y, name=None) tf.truncatemod(x, y, name=None) tf.floormod(x, y, name=None) tf.cross(x, y, name=None) tf.add_n(inputs, name=None) # inputs: A list of Tensor objects, each with same shape and type tf.squared_difference(x, y, name=None)
# 矩陣乘法(tensors of rank >= 2) tf.matmul(a, b, transpose_a=False, transpose_b=False, adjoint_a=False, adjoint_b=False, a_is_sparse=False, b_is_sparse=False, name=None) # 轉置,能夠經過指定 perm=[1, 0] 來進行軸變換 tf.transpose(a, perm=None, name='transpose') # 在張量 a 的最後兩個維度上進行轉置 tf.matrix_transpose(a, name='matrix_transpose') # Matrix with two batch dimensions, x.shape is [1, 2, 3, 4] # tf.matrix_transpose(x) is shape [1, 2, 4, 3] # 求矩陣的跡 tf.trace(x, name=None) # 計算方陣行列式的值 tf.matrix_determinant(input, name=None) # 求解可逆方陣的逆,input 必須爲浮點型或複數 tf.matrix_inverse(input, adjoint=None, name=None) # 奇異值分解 tf.svd(tensor, full_matrices=False, compute_uv=True, name=None) # QR 分解 tf.qr(input, full_matrices=None, name=None) # 求張量的範數(默認2) tf.norm(tensor, ord='euclidean', axis=None, keep_dims=False, name=None) # 構建一個單位矩陣, 或者 batch 個矩陣,batch_shape 以 list 的形式傳入 tf.eye(num_rows, num_columns=None, batch_shape=None, dtype=tf.float32, name=None) # Construct one identity matrix. tf.eye(2) ==> [[1., 0.], [0., 1.]] # Construct a batch of 3 identity matricies, each 2 x 2. # batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2. batch_identity = tf.eye(2, batch_shape=[3]) # Construct one 2 x 3 "identity" matrix tf.eye(2, num_columns=3) ==> [[ 1., 0., 0.], [ 0., 1., 0.]] # 構建一個對角矩陣,rank = 2*rank(diagonal) tf.diag(diagonal, name=None) # 'diagonal' is [1, 2, 3, 4] tf.diag(diagonal) ==> [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]] # 其它 tf.diag_part tf.matrix_diag tf.matrix_diag_part tf.matrix_band_part tf.matrix_set_diag tf.cholesky tf.cholesky_solve tf.matrix_solve tf.matrix_triangular_solve tf.matrix_solve_ls tf.self_adjoint_eig tf.self_adjoint_eigvals
# 計算輸入 tensor 全部元素的和,或者計算指定的軸全部元素的和 tf.reduce_sum(input_tensor, axis=None, keep_dims=False, name=None) # 'x' is [[1, 1, 1] # [1, 1, 1]] tf.reduce_sum(x) ==> 6 tf.reduce_sum(x, 0) ==> [2, 2, 2] tf.reduce_sum(x, 1) ==> [3, 3] tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]] # 維度不縮減 tf.reduce_sum(x, [0, 1]) ==> 6 # 計算輸入 tensor 全部元素的均值/最大值/最小值/積/邏輯與/或 # 或者計算指定的軸全部元素的均值/最大值/最小值/積/邏輯與/或(just like reduce_sum) tf.reduce_mean(input_tensor, axis=None, keep_dims=False, name=None) tf.reduce_max(input_tensor, axis=None, keep_dims=False, name=None) tf.reduce_min(input_tensor, axis=None, keep_dims=False, name=None) tf.reduce_prod(input_tensor, axis=None, keep_dims=False, name=None) tf.reduce_all(input_tensor, axis=None, keep_dims=False, name=None) # 所有知足條件 tf.reduce_any(input_tensor, axis=None, keep_dims=False, name=None) #至少有一個知足條件 ------------------------------------------- # 分界線以上和 Numpy 中相應的用法徹底一致 ------------------------------------------- # inputs 爲一 list, 計算 list 中全部元素的累計和, # tf.add(x, y, name=None)只能計算兩個元素的和,此函數至關於擴展了其功能 tf.accumulate_n(inputs, shape=None, tensor_dtype=None, name=None) # Computes log(sum(exp(elements across dimensions of a tensor))) tf.reduce_logsumexp(input_tensor, axis=None, keep_dims=False, name=None) # Computes number of nonzero elements across dimensions of a tensor tf.count_nonzero(input_tensor, axis=None, keep_dims=False, name=None)
# Compute the cumulative sum of the tensor x along axis tf.cumsum(x, axis=0, exclusive=False, reverse=False, name=None) # Eg: tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] # Compute the cumulative product of the tensor x along axis tf.cumprod(x, axis=0, exclusive=False, reverse=False, name=None)
# Computes the sum/mean/max/min/prod along segments of a tensor tf.segment_sum(data, segment_ids, name=None) # Eg: m = tf.constant([5,1,7,2,3,4,1,3]) s_id = [0,0,0,1,2,2,3,3] s.run(tf.segment_sum(m, segment_ids=s_id)) >array([13, 2, 7, 4], dtype=int32) tf.segment_mean(data, segment_ids, name=None) tf.segment_max(data, segment_ids, name=None) tf.segment_min(data, segment_ids, name=None) tf.segment_prod(data, segment_ids, name=None) # 其它 tf.unsorted_segment_sum tf.sparse_segment_sum tf.sparse_segment_mean tf.sparse_segment_sqrt_n
tf.split(value, num_or_size_splits, axis=0, num=None, name='split') # 'value' is a tensor with shape [5, 30] # Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1 split0, split1, split2 = tf.split(value, [4, 15, 11], 1) tf.shape(split0) # [5, 4] tf.shape(split1) # [5, 15] tf.shape(split2) # [5, 11] # Split 'value' into 3 tensors along dimension 1 split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1) tf.shape(split0) # [5, 10] tf.slice(input_, begin, size, name=None) t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]], [[5, 5, 5], [6, 6, 6]]]) tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]] tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3], # [4, 4, 4]]] tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]], # [[5, 5, 5]]]
# 比較兩個 list 或者 string 的不一樣,並返回不一樣的值和索引 tf.setdiff1d(x, y, index_dtype=tf.int32, name=None) # 返回 x 中的惟一值所組成的tensor 和原 tensor 中元素在現 tensor 中的索引 tf.unique(x, out_idx=None, name=None) # x if condition else y, condition 爲 bool 類型的,可用tf.equal()等來表示 # x 和 y 的形狀和數據類型必須一致 tf.where(condition, x=None, y=None, name=None) # 返回沿着座標軸方向的最大/最小值的索引 tf.argmax(input, axis=None, name=None, output_type=tf.int64) tf.argmin(input, axis=None, name=None, output_type=tf.int64) # x 的值看成 y 的索引,range(len(x)) 索引看成 y 的值 # y[x[i]] = i for i in [0, 1, ..., len(x) - 1] tf.invert_permutation(x, name=None) # 其它 tf.edit_distance
把一組向量從某一維上拼接起來,很向numpy中的Concatenate,官網例子:api
t1 = [[1, 2, 3], [4, 5, 6]] t2 = [[7, 8, 9], [10, 11, 12]] tf.concat([t1, t2], 0) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] tf.concat([t1, t2], 1) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]] # tensor t3 with shape [2, 3] # tensor t4 with shape [2, 3] tf.shape(tf.concat([t3, t4], 0)) ==> [4, 3]
若是是list類型的話也是能夠的,只要是形似Tensor,最後tf.concat返回的仍是Tensor類型數組
相似於數組的索引,能夠把向量中某些索引值提取出來。只適合在一維的狀況下使用。less
import tensorflow as tf a = tf.Variable([[1,2,3,4,5], [6,7,8,9,10], [11,12,13,14,15]]) index_a = tf.Variable([0,2]) b = tf.Variable([1,2,3,4,5,6,7,8,9,10]) index_b = tf.Variable([2,4,6,8]) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) print(sess.run(tf.gather(a, index_a))) print(sess.run(tf.gather(b, index_b))) # [[ 1 2 3 4 5] # [11 12 13 14 15]] # [3 5 7 9]
同上,但容許在多維上進行索引。ide
判斷函數。首先張量x和張量y的尺寸要相同,輸出的tf.greater(x, y)也是一個和x,y尺寸相同的張量。若是x的某個元素比y中對應位置的元素大,則tf.greater(x, y)對應位置返回True,不然返回False。與此相似的函數還有tf.less、tf.greater_equal。函數
a = tf.constant([0, 2, 0, 4, 2, 2], dtype='int32') print(a) # <tf.Tensor 'Const_1:0' shape=(6,) dtype=int32> b = tf.cast(a, 'float32') print(b) # <tf.Tensor 'Cast:0' shape=(6,) dtype=float32>
增長 / 壓縮張量的維度。oop
a = tf.constant([0, 2, 0, 4, 2, 2], dtype='int32') print(a) # <tf.Tensor 'Const_1:0' shape=(6,) dtype=int32> b = tf.expand_dims(a, 0) print(b) # <tf.Tensor 'ExpandDims:0' shape=(1, 6) dtype=int32> print(tf.squeeze(b, 0)) # <tf.Tensor 'Squeeze:0' shape=(6,) dtype=int32>
只要能事先知道tensor的size,均可以經過python的循環來對tensor的entry遍歷處理。學習
import tensorflow as tf data=tf.constant([[1,2,3],[4,5,6]]) aa=data*1 size=aa.get_shape() sum=tf.convert_to_tensor(0) for i in range(2): for j in range(2): sum=sum+data[i][j] with tf.Session() as sess: print(sess.run([sum, size]))
但在tensor size只有在運行的時候才能肯定時,好比輸入不一樣尺寸的圖片,不一樣數量的bounding box,就沒把發在定義graph的時候就肯定個數,這時只有使用tf.while_loop。idea