工做中用卷積方法進行天然語言處理(NLP)相關任務,用到了tensorflow中的一些函數及方法:python
下面分別介紹app
函數形式: tf.nn.conv1d(value, filters, stride, padding, use_cudnn_on_gpu=None, data_format=None, name=None):ide
程序舉例:函數
import tensorflow as tf import numpy as np sess = tf.InteractiveSession() # --------------- tf.nn.conv1d ------------------- inputs=tf.ones((64,10,3)) # [batch, n_sqs, embedsize] w=tf.constant(1,tf.float32,(5,3,32)) # [w_high, embedsize, n_filers] conv1 = tf.nn.conv1d(inputs,w,stride=2 ,padding='SAME') # conv1=[batch, round(n_sqs/stride), n_filers],stride是步長。 tf.global_variables_initializer().run() out = sess.run(conv1) print(out)
函數形式:tf.layters.conv1d(inputs, filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None, use_bias=True,...)code
程序舉例:orm
import tensorflow as tf import numpy as np sess = tf.InteractiveSession() # --------------- tf.layters.conv1d ------------------- inputs=tf.ones((64,10,3)) # [batch, n_sqs, embedsize] num_filters=32 kernel_size =5 conv2 = tf.layers.conv1d(inputs, num_filters, kernel_size,strides=2, padding='valid',name='conv2') # shape = (batchsize, round(n_sqs/strides),num_filters) tf.global_variables_initializer().run() out = sess.run(conv2) print(out)
import tensorflow as tf sess = tf.InteractiveSession() def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME') def max_pool_1x2(x): return tf.nn.avg_pool(x, ksize=[1,1,2,1], strides=[1,1,2,1], padding='SAME') ''' ksize = [x, pool_height, pool_width, x] strides = [x, pool_height, pool_width, x] ''' x = tf.Variable([[1,2,3,4]], dtype=tf.float32) x = tf.reshape(x, [1,1,4,1]) #這一步必不可少,不然會報錯說維度不一致; ''' [batch, in_height, in_width, in_channels] = [1,1,4,1] ''' W_conv1 = tf.Variable([1,1,1],dtype=tf.float32) # 權重值 W_conv1 = tf.reshape(W_conv1, [1,3,1,1]) # 這一步一樣必不可少 ''' [filter_height, filter_width, in_channels, out_channels] ''' h_conv1 = conv2d(x, W_conv1) # 結果:[4,8,12,11] h_pool1 = max_pool_1x2(h_conv1) tf.global_variables_initializer().run() print(sess.run(h_conv1)) # 結果array([6,11.5])x
# 1:stride max pooling convs = tf.expand_dims(conv, axis=-1) # shape=[?,596,256,1] smp = tf.nn.max_pool(value=convs, ksize=[1, 3, self.config.num_filters, 1], strides=[1, 3, 1, 1], padding='SAME') # shape=[?,299,256,1] smp = tf.squeeze(smp, -1) # shape=[?,299,256] smp = tf.reshape(smp, shape=(-1, 199 * self.config.num_filters)) # 2: global max pooling layer gmp = tf.reduce_max(conv, reduction_indices=[1], name='gmp')
kernel_sizes = [3,4,5] # 分別用窗口大小爲3/4/5的卷積核 with tf.name_scope("mul_cnn"): pooled_outputs = [] for kernel_size in kernel_sizes: # CNN layer conv = tf.layers.conv1d(embedding_inputs, self.config.num_filters, kernel_size, name='conv-%s' % kernel_size) # global max pooling layer gmp = tf.reduce_max(conv, reduction_indices=[1], name='gmp') pooled_outputs.append(gmp) self.h_pool = tf.concat(pooled_outputs, 1) #池化後進行拼接