tf訓練OTSU

訓練一個簡單的迴歸網絡

基礎的函數以下:html

# coding=utf-8
import tensorflow as tf
import numpy as np
np.random.seed(0)
# 卷積權重初始化
def weight(shape):
    return tf.Variable(tf.truncated_normal(shape, stddev=0.1), name ='W')
# 誤差值初始化
def bias(shape):
    return tf.Variable(tf.constant(0.1, shape=shape), name = 'b')
# 全鏈接初始化
def fc_weight(node_in, node_out):
    return tf.Variable(np.random.randn(node_in, node_out),name='W', dtype='float32') / (np.sqrt(node_in/2).astype(np.float32))

輸入及網絡、損失函數:node

with tf.name_scope('input'):
    features = tf.placeholder('float32', [None, 7, 7], name='feature')
    images = tf.reshape(features, [-1, 7, 7, 1])
with tf.name_scope('flat'):
    flat = tf.reshape(images, [-1, 49])
with tf.name_scope('hidden'):
    w = fc_weight(49,49)
    b = bias([49])
    hidden1 = tf.nn.relu(tf.matmul(flat, w) + b)
    w2 = fc_weight(49,10)
    b2 = bias([10])
    hidden2 = tf.nn.relu(tf.matmul(hidden1, w2) + b2)
with tf.name_scope('output'):
    w3 = fc_weight(10,3)
    b3 = bias([3])
    out = tf.matmul(hidden2, w3) + b3
with tf.name_scope('optimizer'):
    loss_function = tf.reduce_mean(tf.square(out - [[1./7,1./7,1./7]]))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss_function)
    # 等效於
    # var_list = tf.trainable_variables()
    # for v in var_list:
    #     print v.name
    # sum_loss = loss_function
    # clone_grad = optimizer.compute_gradients(sum_loss, var_list=var_list)
    # grad_updates = optimizer.apply_gradients(clone_grad)

訓練函數:python

# 生成數據集
X_feature = []
for i in range(7):
    for j in range(7):
        for t in range(30):
            deta_i = np.random.randint(3,5)
            deta_j = np.random.randint(3, 5)
            map_feature = np.random.rand(7,7)
            for di in range(deta_i):
                for dj in range(deta_j):
                    ni = i+di; nj =j+dj
                    if ni >=7 or nj >=7:
                        continue
                    map_feature[ni,nj] = np.random.rand()*2+1
            map_feature = (map_feature/6. - 0.5)*2.0
            X_feature.append(map_feature)
X_feature = np.array(X_feature, dtype=np.float32)
np.random.shuffle(X_feature)
print X_feature.shape
# train
with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    trainEpoch = 30
    batchSize = 30
    totalBatchs = int(X_feature.shape[0] / batchSize)
    print X_feature[:1]
    for epoch in range(trainEpoch):
        for i in range(totalBatchs):
            batch = X_feature[i:(i+1)*batchSize]
            rr,ll,tt = sess.run([optimizer,loss_function, out], feed_dict={features:X_feature[:1]}) #反覆迭代第一張
            print ll, tt
        y = sess.run(out, feed_dict={features: X_feature[:1]})
        print y

使用fc層實現OTSU

OTSU見詳細,咱們這裏經過網絡來實現它的效果。
網絡結構:網絡

with tf.name_scope('input'):
    features = tf.placeholder('float32', [None, 7, 7], name='feature')
    images = tf.reshape(features, [-1, 7, 7, 1])
    labels = tf.placeholder('float32',[None,3], name='label')
with tf.name_scope('flat'):
    flat = tf.reshape(images, [-1, 49])

with tf.name_scope('hidden'):
    w = fc_weight(49,49)
    b = bias([49])
    hidden1 = tf.nn.relu(tf.matmul(flat, w) + b)
    w2 = fc_weight(49,10)
    b2 = bias([10])
    hidden2 = tf.nn.relu(tf.matmul(hidden1, w2) + b2)
with tf.name_scope('output'):
    w3 = fc_weight(10,3)
    b3 = bias([3])
    out = tf.matmul(hidden2, w3) + b3

with tf.name_scope('optimizer'):
    loss_function = tf.reduce_mean(tf.square(out - labels))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss_function)

生成測試樣本集:app

dataset = []
for i in range(5):
    for j in range(5):
        for t in range(40):
            deta = np.random.randint(3, 5)
            if i+deta >=7 or j+deta>=7:
                continue
            map_feature = np.random.rand(7, 7)
            for di in range(deta):
                for dj in range(deta):
                    ni = i + di
                    nj = j + dj
                    map_feature[ni, nj] = np.random.rand() * 2 + 2
            #label
            centor_x = i+deta/2.
            centor_y = j+deta/2.
            length = deta
            map_feature = (map_feature / 4. - 0.5) * 2.0
            map_laebl = np.array([centor_x,centor_y,length])
            sample = [map_feature,map_laebl]
            dataset.append(sample)
#
random.shuffle(dataset)
allbatch = [ele[0] for ele in dataset]
alllabel = [ele[1] for ele in dataset]

訓練:dom

with tf.Session() as sess:
    writer = tf.summary.FileWriter("./logs/", sess.graph)
    sess.run(tf.initialize_all_variables())
    trainEpoch = 3000
    batchSize = 30
    totalBatchs = int(len(dataset) / batchSize)
    for epoch in range(trainEpoch):
        for i in range(totalBatchs):
            batch = allbatch[i:(i + 1) * batchSize]
            label = alllabel[i:(i + 1) * batchSize]
            _, = sess.run([optimizer], feed_dict={features: batch, labels:label})
        loss = sess.run([loss_function, out], feed_dict={features: allbatch[:3], labels: alllabel[:3]})
        print loss
        print alllabel[:3]

使用網絡直接優化OTSU

網絡:函數

def h(x):
    return tf.sigmoid(5*x)
with tf.name_scope('input'):
    features = tf.placeholder('float32', [None, 7, 7], name='feature')
    images = tf.reshape(features, [-1, 7, 7, 1])
with tf.name_scope('flat'):
    flat = tf.reshape(images, [-1, 49])
with tf.name_scope('hidden'):
    w = fc_weight(49,49)
    b = bias([49])
    hidden1 = tf.nn.relu(tf.matmul(flat, w) + b)
    w2 = fc_weight(49,10)
    b2 = bias([10])
    hidden2 = tf.nn.relu(tf.matmul(hidden1, w2) + b2)
with tf.name_scope('output'):
    w3 = fc_weight(10,3)
    b3 = bias([3])
    out = tf.matmul(hidden2, w3) + b3

with tf.name_scope('integral'):
    size = 48
    big_images = tf.image.resize_bilinear(images, [size, size])

with tf.name_scope('optimizer'):
    out_limit = tf.minimum(tf.maximum(out, 0.), 1.)
    cx = out_limit[:, 0]
    cy = out_limit[:, 1]
    ll = out_limit[:, 2]
    
    x1 = tf.maximum(cx - ll, 0.) * (size-1)
    y1 = tf.maximum(cy - ll, 0.) * (size-1)
    x2 = tf.minimum(cx + ll, 1.) * (size-1)
    y2 = tf.minimum(cy + ll, 1.) * (size-1)

    rowlist = []
    for i in range(size):
        rowlist.append(np.ones(size)*i)
    rows = np.concatenate(rowlist).astype(np.float32)
    cols = np.tile(np.arange(0, size, dtype=np.float32),[size])
    elems = (rows, cols)
    # 經過函數來實現crop操做
    def mf(ele):
        x=ele[0]
        y=ele[1]
        return (h(x-x1) - h(x-x2)) * (h(y-y1) - h(y-y2))

    omap = tf.map_fn(mf, elems, dtype='float32')
    pmap = tf.reshape(omap,[1,size,size,-1])
    tmap = tf.transpose(pmap, perm=[3,2,1,0]) #b * size * size * 1
    roidot = tmap*big_images
    roi = tf.reduce_sum(roidot, axis=[1,2,3])
    total = tf.reduce_sum(big_images, axis=[1,2,3])
    areanum = tf.reduce_sum(tmap, axis=[1,2,3]) + 0.1
    w0 = areanum / size / size
    w1 = 1. - w0
    u0 = roi / areanum
    u1 = (total - roi) / (size*size - areanum)
    #
    penalty = tf.maximum((0.1 - w0)*100., 0.)
    loss_func = tf.reduce_mean(1 - tf.sign(u0-u1)*w0*w1*(u0-u1)*(u0-u1))
    loss_penalty = tf.reduce_mean(penalty)
    loss_function = loss_func + loss_penalty
    #
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss_function)

數據及訓練:學習

X_feature = []
for i in range(5):
    for j in range(5):
        for t in range(30):
            deta_i = np.random.randint(3, 5)
            deta_j = np.random.randint(3, 5)
            map_feature = np.random.rand(7, 7)
            for di in range(deta_i):
                for dj in range(deta_j):
                    ni = i + di
                    nj = j + dj
                    if ni >= 7 or nj >= 7:
                        continue
                    map_feature[ni, nj] = np.random.rand() * 2 + 2
            map_feature = (map_feature / 4. - 0.5) * 2.0
            X_feature.append(map_feature)
X_feature = np.array(X_feature, dtype=np.float32)
np.random.shuffle(X_feature)
print X_feature.shape
# train
with tf.Session() as sess:
    writer = tf.summary.FileWriter("./logs/", sess.graph)
    sess.run(tf.initialize_all_variables())
    trainEpoch = 30
    batchSize = 30
    totalBatchs = int(X_feature.shape[0] / batchSize)
    print X_feature[:1]
    for epoch in range(trainEpoch):
        for i in range(totalBatchs):
            batch = X_feature[i:(i + 1) * batchSize]
            _, ll, tt = sess.run([optimizer, out_limit, areanum], feed_dict={features: batch})  # 反覆迭代第一張
        y = sess.run([x1,y1,x2,y2, loss_function], feed_dict={features: X_feature[:1]})
        print y

在整個訓練的過程當中,咱們觀察到幾個問題:測試

  1. 不要用本身寫的sigmod實現,反向傳播會越界
  2. 最後輸出層不要有relu
  3. 有些函數導數爲0(沒法反向傳播),No gradient defined for operation 'xxx'. 例子/神器
  4. 學習率的設置很重要。示例中設置0.005很快收斂;設置爲0.05很快就陷入了局部最小值;設置爲0.001,因爲adam的自動更新,優化愈來愈慢,最後也很難收斂到最優。
相關文章
相關標籤/搜索