๊ธฐ๋ณธ ๋ชจ๋ธ (softmax classification) # Lab 7 Learning rate and Evaluation import tensorflow as tf import matplotlib.pyplot as plt import random from tensorflow.examples.tutorials.mnist import input_data tf.set_random_seed(777) # reproducibility # Check out https://www.tensorflow.org/get_started/mnist/beginners for # more information about the mnist dataset mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # input place holders X = tf.placeholder(tf.float32, [None, 784]) Y = tf.placeholder(tf.float32, [None, 10]) # weights & bias for nn layers W = tf.Variable(tf.random_normal([784, 10])) b = tf.Variable(tf.random_normal([10])) # parameters learning_rate = 0.001 batch_size = 100 num_epochs = 50 num_iterations = int(mnist.train.num_examples / batch_size) hypothesis = tf.matmul(X, W) + b # define cost/loss & optimizer cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2( logits=hypothesis, labels=tf.stop_gradient(Y) ) ) train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, axis=1), tf.argmax(Y, axis=1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # train my model with tf.Session() as sess: # initialize sess.run(tf.global_variables_initializer()) for epoch in range(num_epochs): avg_cost = 0 for iteration in range(num_iterations): batch_xs, batch_ys = mnist.train.next_batch(batch_size) _, cost_val = sess.run([train, cost], feed_dict={X: batch_xs, Y: batch_ys}) avg_cost += cost_val / num_iterations print(f"Epoch: {(epoch + 1):04d}, Cost: {avg_cost:.9f}") print("Learning Finished!") # Test model and check accuracy print( "Accuracy:", sess.run(accuracy, feed_dict={X: mnist.test.images, Y: mnist.test.labels}), ) # Get one and predict r = random.randint(0, mnist.test.num_examples - 1) print("Label: ", sess.run(tf.argmax(mnist.test.labels[r : r + 1], axis=1))) print( "Prediction: ", sess.run( tf.argmax(hypothesis, axis=1), feed_dict={X: mnist.test.images[r : r + 1]} ), ) plt.imshow( mnist.test.images[r : r + 1].reshape(28, 28), cmap="Greys", interpolation="nearest", ) plt.show() ''' Epoch: 0001 Cost: 5.745170949 Epoch: 0002 Cost: 1.780056722 Epoch: 0003 Cost: 1.122778654 ... Epoch: 0048 Cost: 0.271918680 Epoch: 0049 Cost: 0.270640434 Epoch: 0050 Cost: 0.269054370 Learning Finished! Accuracy: 0.9194 ''' NN for MNIST # Lab 10 MNIST and NN import tensorflow as tf import random # import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data tf.set_random_seed(777) # reproducibility mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # Check out https://www.tensorflow.org/get_started/mnist/beginners for # more information about the mnist dataset # parameters learning_rate = 0.001 training_epochs = 15 batch_size = 100 # input place holders X = tf.placeholder(tf.float32, [None, 784]) Y = tf.placeholder(tf.float32, [None, 10]) # weights & bias for nn layers W1 = tf.Variable(tf.random_normal([784, 256])) b1 = tf.Variable(tf.random_normal([256])) L1 = tf.nn.relu(tf.matmul(X, W1) + b1) W2 = tf.Variable(tf.random_normal([256, 256])) b2 = tf.Variable(tf.random_normal([256])) L2 = tf.nn.relu(tf.matmul(L1, W2) + b2) W3 = tf.Variable(tf.random_normal([256, 10])) b3 = tf.Variable(tf.random_normal([10])) hypothesis = tf.matmul(L2, W3) + b3 # define cost/loss & optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( logits=hypothesis, labels=Y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # initialize sess = tf.Session() sess.run(tf.global_variables_initializer()) # train my model for epoch in range(training_epochs): avg_cost = 0 total_batch = int(mnist.train.num_examples / batch_size) for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) feed_dict = {X: batch_xs, Y: batch_ys} c, _ = sess.run([cost, optimizer], feed_dict=feed_dict) avg_cost += c / total_batch print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost)) print('Learning Finished!') # Test model and check accuracy correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print('Accuracy:', sess.run(accuracy, feed_dict={ X: mnist.test.images, Y: mnist.test.labels})) # Get one and predict r = random.randint(0, mnist.test.num_examples - 1) # ๋ฐ์ print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1))) print("Prediction: ", sess.run( tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]})) # plt.imshow(mnist.test.images[r:r + 1]. # reshape(28, 28), cmap='Greys', interpolation='nearest') # plt.show() ''' Epoch: 0001 cost = 141.207671860 Epoch: 0002 cost = 38.788445864 Epoch: 0003 cost = 23.977515479 Epoch: 0004 cost = 16.315132428 Epoch: 0005 cost = 11.702554882 Epoch: 0006 cost = 8.573139748 Epoch: 0007 cost = 6.370995680 Epoch: 0008 cost = 4.537178684 Epoch: 0009 cost = 3.216900532 Epoch: 0010 cost = 2.329708954 Epoch: 0011 cost = 1.715552875 Epoch: 0012 cost = 1.189857912 Epoch: 0013 cost = 0.820965160 Epoch: 0014 cost = 0.624131458 Epoch: 0015 cost = 0.454633765 Learning Finished! Accuracy: 0.9455 '''