# Lab 5 Logistic Regression Classifier import tensorflow as tf tf.set_random_seed(777) # for reproducibility x_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]] y_data = [[0], [0], [0], [1], [1], [1]] # placeholders for a tensor that will be always fed. X = tf.placeholder(tf.float32, shape=[None, 2]) Y = tf.placeholder(tf.float32, shape=[None, 1]) W = tf.Variable(tf.random_normal([2, 1]), name='weight') b = tf.Variable(tf.random_normal([1]), name='bias') # ๋ก์ง์คํฑ ํ๊ท๋ ํ๊ท๊ฐ์ sigmoid ํจ์๋ฅผ ํต๊ณผ์ํจ๋ค. # Hypothesis using sigmoid: tf.div(1., 1. + tf.exp(tf.matmul(X, W))) hypothesis = tf.sigmoid(tf.matmul(X, W) + b) # cost/loss function # ์ด๊ฑด ์ฌ์ค cost ํจ์๋ฅผ ๋ฏธ๋ถํ ๊ฒ cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis)) train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost) # Accuracy computation # True if hypothesis>0.5 else False # cast ํจ์๋ฅผ ์ฐ๋ฉด T/F๋ก ์ถ๋ ฅํด์ค๋ค. predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32) accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32)) # Launch graph with tf.Session() as sess: # Initialize TensorFlow variables sess.run(tf.global_variables_initializer()) for step in range(10001): cost_val, _ = sess.run([cost, train], feed_dict={X: x_data, Y: y_data}) if step % 200 == 0: print(step, cost_val) # Accuracy report h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict={X: x_data, Y: y_data}) print("\nHypothesis: ", h, "\nCorrect (Y): ", c, "\nAccuracy: ", a) ''' 0 1.73078 200 0.571512 400 0.507414 600 0.471824 800 0.447585 ... 9200 0.159066 9400 0.15656 9600 0.154132 9800 0.151778 10000 0.149496 Hypothesis: [[ 0.03074029] [ 0.15884677] [ 0.30486736] [ 0.78138196] [ 0.93957496] [ 0.98016882]] Correct (Y): [[ 0.] [ 0.] [ 0.] [ 1.] [ 1.] [ 1.]] Accuracy: 1.0 ''' ๋น๋จ๋ณ ๋ฐ์ดํฐ ๋๋ ค๋ณด๊ธฐ # Lab 5 Logistic Regression Classifier import tensorflow as tf import numpy as np tf.set_random_seed(777) # for reproducibility xy = np.loadtxt('data-03-diabetes.csv', delimiter=',', dtype=np.float32) x_data = xy[:, 0:-1] y_data = xy[:, [-1]] print(x_data.shape, y_data.shape) # placeholders for a tensor that will be always fed. X = tf.placeholder(tf.float32, shape=[None, 8]) Y = tf.placeholder(tf.float32, shape=[None, 1]) W = tf.Variable(tf.random_normal([8, 1]), name='weight') b = tf.Variable(tf.random_normal([1]), name='bias') # Hypothesis using sigmoid: tf.div(1., 1. + tf.exp(-tf.matmul(X, W))) hypothesis = tf.sigmoid(tf.matmul(X, W) + b) # cost/loss function cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis)) train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost) # Accuracy computation # True if hypothesis>0.5 else False predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32) accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32)) # Launch graph with tf.Session() as sess: # Initialize TensorFlow variables sess.run(tf.global_variables_initializer()) for step in range(10001): cost_val, _ = sess.run([cost, train], feed_dict={X: x_data, Y: y_data}) if step % 200 == 0: print(step, cost_val) # Accuracy report h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict={X: x_data, Y: y_data}) print("\nHypothesis: ", h, "\nCorrect (Y): ", c, "\nAccuracy: ", a) ''' 0 0.82794 200 0.755181 400 0.726355 600 0.705179 800 0.686631 ... 9600 0.492056 9800 0.491396 10000 0.490767 ... [ 1.] [ 1.] [ 1.]] Accuracy: 0.762846 '''