使用tensorflow實現LR

使用tf實現LR.html

import tensorflow as tf
import numpy as np
tf.reset_default_graph() # 清空Graph

dom

FEATURE_NUM = 8 # 特徵數量
with tf.name_scope("input"):
x = tf.placeholder(tf.float32, shape=[None, FEATURE_NUM])
y = tf.placeholder(tf.int32, shape=[None])


ide

with tf.name_scope("lr"):
weight_init = tf.truncated_normal(shape=[FEATURE_NUM, 1], mean=0.0, stddev=1.0)
weight = tf.Variable(weight_init)
bais = tf.Variable([0.0])
y_expand = tf.reshape(y,shape=[-1,1])
hypothesis = tf.sigmoid(tf.matmul(x, weight) + bais)




學習

with tf.name_scope("loss"):
y_float = tf.to_float(y_expand)
likelyhood = -(y_float tf.log(hypothesis) + (1.0 - y_float) (tf.log(1.0 - hypothesis)))
loss = tf.reduce_mean(likelyhood, axis=0)


code

LEARNING_RATE = 0.02 # 學習速率
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
training_op = optimizer.minimize(loss)


orm

THRESHOLD = 0.5 # 判斷門限
with tf.name_scope("eval"):
predictions = tf.sign(hypothesis - THRESHOLD)
labels = tf.sign(y_float - THRESHOLD)
corrections = tf.equal(predictions, labels)
accuracy = tf.reduce_mean(tf.cast(corrections, tf.float32))




htm

init = tf.global_variables_initializer() # 初始化全部變量blog

EPOCH = 10 # 迭代次數
with tf.Session() as sess:
sess.run(init)
for i in range(EPOCH):
_training_op, _loss = sess.run([training_op, loss],
feed_dict={x: np.random.rand(10, 8), y: np.random.randint(2, size=10)})
_accuracy = sess.run([accuracy], feed_dict={x: np.random.rand(5, 8), y: np.random.randint(2, size=5)})
print("epoch:", i, _loss, _accuracy)






input

參考文章
https://www.cnblogs.com/jhc888007/p/10390282.html
相關文章
相關標籤/搜索