當擁有兩層神經元時候,擬合程度明顯比一層好
並出現以下警告:網絡
C:\Program Files\Python36\lib\site-packages\matplotlib\backend_bases.py:2453: MatplotlibDeprecationWarning: Using default event loop until function specific to this GUI is implemented warnings.warn(str, mplDeprecation)
偶爾畫出直線,不知爲什麼
當Learning Rate越高,或者層數越多
畫出直線或者罷工的概率就越大,願評論區能出現解答
罷工的緣由: NaN
直線:未知?,NN學錯東西了dom
import tensorflow as tf import matplotlib.pyplot as plt import numpy as np # refenrence: http://blog.csdn.net/jacke121/article/details/74938031 ''' numpy.linspace test import numpy as np # list_random1 = np.linspace(-1, 1, 300) # list_random2 = np.linspace(-1, 1, 300)[:, np.newaxis] # print(list_random1) # print(list_random1) # print(np.shape(list_random1), np.shape(list_random2)) # (300) (300, 1) x_data = np.linspace(-1,1,10)[:, np.newaxis] noise = np.random.normal(0, 0.05, x_data.shape) y_data = np.square(x_data) - 0.5 + noise x_data2 = tf.random_uniform([10, 1], -1, 1) y_data2 = tf.square(x_data2) - tf.random_normal(x_data2.shape, 2) print(x_data, '\n\n', x_data.shape, '\n\n') # (10, 1) print(y_data, '\n\n', y_data.shape, '\n\n') with tf.Session() as sess: sess.run(tf.global_variables_initializer()) print(sess.run(x_data2)) print(x_data2) # Tensor("random_uniform:0", shape=(10, 1), dtype=float32) print(sess.run(y_data2)) print(y_data2) ''' # add layer function def add_layer(inputs, input_size, output_size, activation_function = None): # add one more layer and return the output of this layer Weights = tf.Variable(tf.random_normal([input_size, output_size])) biases = tf.Variable(tf.zeros([1, output_size]) + 0.1) outputs = tf.matmul(inputs, Weights) + biases if activation_function is not None: outputs = activation_function(outputs) return outputs # creat data # x_data: random[-1, 1) shape=[100,1] uniform distribute # y_data: normal distribute(2) of random numbers, shape = [100, 1] x_data = np.linspace(-1,1,100)[:, np.newaxis] noise = np.random.normal(0, 0.05, x_data.shape) y_data = np.square(x_data) - 0.5 + noise # define placehold for inputs of nn x = tf.placeholder(tf.float32, [None, 1]) y = tf.placeholder(tf.float32, [None, 1]) # hidden layer layer1 = add_layer(x, 1, 10, activation_function=tf.nn.relu) # hidden layer2 layer2 = add_layer(layer1, 10, 10, activation_function=tf.nn.relu) # output layer output_layer = add_layer(layer2, 10, 1, activation_function=None) # define loss for nn loss = tf.reduce_mean(tf.reduce_sum(tf.square(y - output_layer), reduction_indices=[1])) train = tf.train.GradientDescentOptimizer(0.1).minimize(loss) # visualize the result fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.scatter(x_data, y_data) plt.ion() plt.show() with tf.Session() as sess: sess.run(tf.initialize_all_variables()) for i in range(1, 500): sess.run(train, feed_dict={x: x_data, y: y_data}) # visualize the result if i%20 == 0: try: ax.lines.remove(lines[0]) except Exception: pass output = sess.run(output_layer, feed_dict={x: x_data}) lines = ax.plot(x_data, output, 'r-', lw=5) plt.pause(0.1) plt.pause(100)