TensorFlow學習筆記(七)Tesnor Board

  爲了更好的管理、調試和優化神經網絡的訓練過程,TensorFlow提供了一個可視化工具TensorBoard。TensorBoard能夠有效的展現TensorFlow在運行過程當中的計算圖。、各類指標隨着時間變化的趨勢以及訓練中使用到的腿昂等信息react

   1、TensorBoard簡介

  2、TensorBoard計算圖可視化

    一、命名空間與TensorBoard圖上節點

    二、節點信息

    三、監控指標可視化

   1、TensorBoard簡介

   TensorBoard是 TensorFlow的可視化工具,它能夠經過TensorFlow程序運行過程當中輸出的日誌文件可視化TensorFlow的運行狀態。TB與TF跑在不一樣分進程中。TB自動讀取最新的TF日誌文件,呈現當前TF的最新狀態。git

  

import tensorflow as tf

#定義一個簡單的計算圖,實現向量的加法
input1 = tf.constant([1.0,2.0,3.0],name="input1")
input2 = tf.Variable(tf.random_uniform([3]),name="input2")
output = tf.add_n([input1,input2],name="output")
#生成一個寫日誌的writer,並將當前TF計算圖寫入日誌
writer = tf.summary.FileWriter("path/to/log",graph=tf.get_default_graph())
writer.close()

經過命令tensorboard --logdir=path/to/log 來啓動tensorboard網絡

 

 

 

  2、TensorBoard計算圖可視化

    一、命名空間與TensorBoard圖上節點

   爲了更好的組織可視化效果圖上的計算節點,TB支持經過TF命名空間來整理可視化效果圖上的節點。TensorFlow提供了兩個命名空間函數tf.variable_scope和tf.name_scope。二者基本是等價的。惟一的區別是在使用tf.get_variable上有所不一樣。app

  

import tensorflow as tf

with tf.variable_scope("foo"):
    #在命名空間foo下,獲取變量「bar」。獲得變量 foo/bar
    a = tf.get_variable("bar",[1])
    print(a.name)

with tf.variable_scope("bar"):
    #在命名空間foo下,獲取變量「bar」。獲得變量 bar/bar.此時bar/bar和foo/bar並不衝突
    b = tf.get_variable("bar",[1])
    print(b.name)

with tf.name_scope("a"):
    #使用tf.Variable 會受到tf.name_scope影響。變量名爲「b_1/Variable:0」
    a = tf.Variable([1])
    print(a.name)
    #使用tf.get_variable 不會受到tf.name_scope影響。變量名爲「b:0」,沒有加上name_scope的前綴
    b = tf.get_variable("b",[1])
    print(b.name)
with tf.name_scope("b"):
    #使用tf.Variable 會受到tf.name_scope影響。變量名爲「b/Variable:0」
    a = tf.Variable([1])
    print(a.name)
    #使用tf.get_variable 不會受到tf.name_scope影響。變量名也爲「b:0」,沒有加上name_scope的前綴
    #會報錯重複聲明
    b = tf.get_variable("b",[1])
    print(b.name)

改進上一節的樣例代碼dom

import tensorflow as tf

with tf.name_scope("inout1"):
    input1 = tf.constant([1.0,2.0,3.0],name="input1")
with tf.name_scope("input2"):
    intput2 = tf.Variable(tf.random_uniform([3]),name="input2")
output = tf.add_n([input1,intput2],name="add")

writer = tf.summary.FileWriter('path/to/log',tf.get_default_graph())
writer.close()

可視化TensorFlow(五)中的樣例程序函數

# -*- coding:utf-8 -*-
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#加載mnsit_inference.py中定義的變量和函數
from integerad_mnist import mnsit_inference1
import numpy as np

#配置神經網絡的參數
BATCH_SIZE = 100
LR_BASE = 0.8
LR_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
TRANING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
#模型保存的文件名和路徑
MODEL_SAVE_PATH = "path/to/model/"
MODEL_SAVE_NAME = "model.ckpt"


INPUT_NODE = 784
OUTPUT_NODE =10
LAYER_NODE = 500

def train(mnsit):
    #定義輸入和輸出的placeholder,將處理輸入數據的計算都放在「input」
    with tf.name_scope("input"):
        x = tf.placeholder(tf.float32,shape=[None,mnsit_inference1.INPUT_NODE],name="x_input")
        y_ = tf.placeholder(tf.float32,shape=[None,mnsit_inference1.OUTPUT_NODE],name="y_input")
    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    #直接使用mnsit_inference中定義的前向傳播過程
    y = mnsit_inference1.inference(x,regularizer)
    global_step = tf.Variable(0,trainable=False)
    #將處理滑動平均相關的計算都放在moving_average命名空間下
    with tf.name_scope("moving_average"):
        variable_average = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
        variable_average_op = variable_average.apply(tf.trainable_variables())
    #將計算loss相關的計算都放在loss_func命名空間下
    with tf.name_scope("loss_func"):
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(y_,1),logits=y)
        cross_entropy_mean = tf.reduce_mean(cross_entropy)
        loss = cross_entropy_mean + tf.add_n(tf.get_collection("losses"))
    #定義學習率、優化方法等放在「train_step」下
    with tf.name_scope("train_step"):
        learning_rate = tf.train.exponential_decay(LR_BASE,global_step,mnsit.train.num_examples/BATCH_SIZE,LR_DECAY)
        train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
        with tf.control_dependencies([train_step,variable_average_op]):
            train_op = tf.no_op("train")
    #初始化TF的持久化類
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        for i in range(TRANING_STEPS):
            xs,ys = mnsit.train.next_batch(BATCH_SIZE)
            _,loss_value,step = sess.run([train_op,loss,global_step],feed_dict={x:xs,y_:ys})
            #每1000輪保存一次模型
            if i % 1000 == 0:
                print("After {0} training steps,loss on training batch is {1}".format(step,loss_value))
                saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_SAVE_NAME),global_step=global_step)
    writer = tf.summary.FileWriter("path/to/log",tf.get_default_graph())
    writer.close()
def main(argv = None):
    mnsit = input_data.read_data_sets("mnist_set",one_hot=True)
    train(mnsit)
if __name__ == '__main__':
    tf.app.run()

生成的TB可視化工具

 

     除了手動的經過TensorFlow的命名空間來調整TensorBoard的可視化效果圖,TensorFlow也會智能的調整可視化效果圖上的節點。TB將TF分紅了主圖和輔助圖。左側的Graph爲主圖,右側的Auxiliary Nodes爲輔助圖。TF會主動把鏈接表較多的點列出來放在輔助圖中。學習

    除了自動的方式,TF也支持手動的方式來調整可視化效果。優化

 

    二、節點信息

  除了展現TF計算圖的結構,TB還能夠展現TF計算圖上每一個節點的基本信息以及運行是所消耗的時間以及空間。spa

  調整上面代碼中迭代訓練的部分,展現每次迭代TF計算節點運行時間和消耗的內存。

    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        writer = tf.summary.FileWriter("path/to/log",tf.get_default_graph())
        for i in range(TRANING_STEPS):

            xs,ys = mnsit.train.next_batch(BATCH_SIZE)
            _,loss_value,step = sess.run([train_op,loss,global_step],feed_dict={x:xs,y_:ys})
            #每1000輪記錄一次運行狀態
            if i % 1000 == 0:
                #配置運行是須要記錄的信息
                run_options =tf.RunOptions(trace_level = tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
                #將配置信息和記錄運行是的元信息傳入運行過程
                _,loss_value,step = sess.run([train_op,loss,global_step],feed_dict={x:xs,y_:ys},options=run_options,run_metadata=run_metadata)
                #將節點在運行是的信息寫入日誌
                writer.add_run_metadata(run_metadata,"step-%s"%i)
                print("After {0} training steps,loss on training batch is {1}".format(step,loss_value))
            else:
                 _,loss_value,step = sess.run([train_op,loss,global_step],feed_dict={x:xs,y_:ys})
    writer.close()

 

    三、監控指標可視化

  TB除了可視化TF的計算圖,還能夠可視化TF運行程序中各類有助於瞭解運行程序狀態的監控指標。

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
SUMMARY_DIR = "path/to/log"
BATCH_SIZE =100
TRAIN_STEPS =30000

def variable_summaries(var,name):
    with tf.name_scope("summaries"):
        tf.summary.histogram(name,var)
        mean = tf.reduce_mean(var)
        tf.summary.scalar("mean/"+name,mean)
        stddev = tf.sqrt(tf.reduce_mean(tf.square(var-mean)))
        tf.summary.scalar("stddev/"+name,stddev)

#生成一層全鏈接層神經網絡
def nn_layer(input_tensor,input_dim,output_dim,layer_name,act= tf.nn.relu):
    #將同一層神經網絡放在一個統一的空間
    with tf.name_scope(layer_name):
        with tf.name_scope("weights"):
            weights = tf.Variable(tf.truncated_normal([input_dim,output_dim],stddev=0.1))
            variable_summaries(weights,layer_name+'/weights')
        with tf.name_scope("biases"):
            biases = tf.Variable(tf.constant(0.0,shape=[output_dim]))
            variable_summaries(biases,layer_name+'/biases')
        with tf.name_scope("Wx_plus_b"):
            preactivate = tf.matmul(input_tensor,weights)+biases
            tf.summary.histogram(layer_name+'/pre_activations',preactivate)
            activations = act(preactivate)
            tf.summary.histogram(layer_name+"/activations",activations)
            return activations
def main(_):
    mnsit = input_data.read_data_sets('mnist_set',one_hot=True)
    with tf.name_scope('input'):
        x = tf.placeholder(tf.float32,shape=[None,784],name='x_input')
        y_ = tf.placeholder(tf.float32,shape=[None,10],name='y_input')
    with tf.name_scope('input_reshape'):
        image_shaped_input = tf.reshape(x,[-1,28,28,1])
        tf.summary.image('input',image_shaped_input,10)
    hidden1 = nn_layer(x,784,500,'layer1')
    y = nn_layer(hidden1,500,10,'layer2')
    with tf.name_scope('cross_entropy'):
        cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y))
        tf.summary.scalar('cross_entropy',cross_entropy)
    with tf.name_scope('train'):
        train_op = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
    with tf.name_scope('accuracy'):
        with tf.name_scope('correct_prediction'):
            correct_prediction = tf.equal(tf.arg_max(y,1),tf.argmax(y_,1))
        with tf.name_scope('accuracy'):
            accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
            tf.summary.scalar('accuracy',accuracy)
    merged = tf.summary.merge_all()

    with tf.Session() as sess :
        summary_writer = tf.summary.FileWriter(SUMMARY_DIR,sess.graph)
        tf.global_variables_initializer().run()
        for i in range(TRAIN_STEPS):
            xs,ys = mnsit.train.next_batch(BATCH_SIZE)
            summary,_ = sess.run([merged,train_op],feed_dict={x:xs,y_:ys})
            summary_writer.add_summary(summary,i)
    summary_writer.close()

if __name__ == '__main__':
    tf.app.run()

相關文章
相關標籤/搜索