Code

import tensorflow as tf
tf.set_random_seed(777)  # ๊ฐ™์€ ๊ฐ’์ด ๋‚˜์˜ค๋„๋ก ํ•˜๊ธฐ ์œ„ํ•ด์„œ
 
# ๋ฐ์ดํ„ฐ ์ค€๋น„
x_train = [1, 2, 3]
y_train = [1, 2, 3]
 
# ์ดˆ๊ธฐ์— ๋žœ๋คํ•˜๊ฒŒ ๊ฐ’์„ ํ• ๋‹นํ•œ๋‹ค.
W = tf.Variable(tf.random_normal([1]), name = 'weight')
b = tf.Variable(tf.random_normal([1]), name="bias")
 
# ์šฐ๋ฆฌ์˜ ๊ฐ€์„ค, XW + b
# ์„ ํ˜•์œผ๋กœ ์ƒ๊ฒผ์„ ๊ฒƒ์ด๋‹ค, ์˜ˆ์ธก๊ฐ’
hypothesis = x_train * W + b

Cost function์œผ๋กœ MSE๋ฅผ ์‚ฌ์šฉํ•  ๊ฒƒ์ด๋ฏ€๋กœ,

# cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - y_train))

์ด์ œ, ๋ฐ์ดํ„ฐ๊ฐ€ ์ฃผ์–ด์กŒ๊ณ , ์–ด๋–ค Cost function์„ ์ตœ์ ํ™”ํ• ์ง€๋„ ์ •ํ•ด์กŒ์œผ๋‹ˆ,

์–ด๋–ค ์•Œ๊ณ ๋ฆฌ์ฆ˜์œผ๋กœ ์ตœ์ ํ™”ํ• ์ง€ ๊ฒฐ์ •ํ•ด์•ผ ํ•œ๋‹ค.

๊ธฐ๋ณธ์ ์œผ๋กœ ์šฐ๋ฆฌ๋Š” Gradient Descent๋ฅผ ์‚ฌ์šฉํ•˜๋„๋ก ํ•˜๊ฒ ๋‹ค.

# optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.01)
train = optimizer.minimize(cost)

์—ฌ๊ธฐ๊นŒ์ง€ Tensorflow๋ฅผ ์‚ฌ์šฉํ•จ์— ์žˆ์–ด์„œ Computation Graph๋ฅผ ๋‹ค ๊ทธ๋ ธ๋‹ค๊ณ  ์ƒ๊ฐํ•˜๋ฉด ๋œ๋‹ค.

์ด์ œ ์‹คํ–‰ํ•ด์•ผ ํ•˜๋ฏ€๋กœ, session์„ ์ˆ˜ํ–‰ํ•˜์ž.

sess = tf.Session()

๊ทธ๋Ÿฐ๋ฐ ์šฐ๋ฆฌ๊ฐ€ ๋ณ€์ˆ˜๋กœ ์ง€์ •ํ•ด ๋†“์€ W, b๋ฅผ ์‚ฌ์šฉํ•˜๊ธฐ ์ „์—๋Š” ํ•ญ์ƒ ์ดˆ๊ธฐํ™” ๋ฅผ ํ•ด์ฃผ์–ด์•ผ ํ•œ๋‹ค.

sess.run(tf.global_variables_initializer())

์ด์ œ ์‹คํ–‰์‹œํ‚ฌ ์ค€๋น„๊ฐ€ ๋˜์—ˆ๊ณ , ์šฐ๋ฆฌ๋Š” ์ด 2000๋ฒˆ training์„ ์ˆ˜ํ–‰ํ•˜๊ณ  ์‹ถ๋‹ค.

์ด๋•Œ ์‹คํ–‰์‹œํ‚ค๊ณ  ์‹ถ์€ ๋…ธ๋“œ๋Š” train ์ด๋‹ค.

for step in range(2001):
    sess.run(train)

๊ทธ๋Ÿฐ๋ฐ ์ด๋Ÿด ๊ฒฝ์šฐ ์–ด๋–ป๊ฒŒ ์ž˜ fitting์ด ๋˜์—ˆ๋Š”์ง€ ์•Œ ์ˆ˜ ์—†์œผ๋ฏ€๋กœ, ๋‹ค๋ฅธ ๊ฐ’์„์„ printํ•ด๋ณด์ž.

์šฐ๋ฆฌ๊ฐ€ ์กฐ์‚ฌํ•˜๊ณ  ์‹ถ์€ ๊ฐ’์€,

  1. step์— ๋”ฐ๋ผ์„œ
  2. cost๊ฐ€ ์–ด๋–ป๊ฒŒ ๋ณ€ํ™”ํ–ˆ๋Š”์ง€
  3. W๊ฐ€ ์–ด๋–ป๊ฒŒ ๋ณ€ํ™”ํ–ˆ๋Š”์ง€
  4. b๊ฐ€ ์–ด๋–ป๊ฒŒ ๋ณ€ํ™”ํ–ˆ๋Š”์ง€ ์ด๋‹ค.
for step in range(2001):
    sess.run(train)
    if step % 20 == 0:
        print(step, sess,run(cost), sess.run(W), sess.run(b))

๋จผ์ € 2001๋ฒˆ ๋ฐ˜๋ณต์„ ์ง„ํ–‰ํ•˜๊ณ ,

2000๋ฒˆ print๋ฅผ ํ•˜์ง€๋ง๊ณ  200๋ฒˆ๋งŒ ํ•˜๋„๋ก ํ•˜์ž.

๋…ธ๋“œ๋ฅผ ์‹คํ–‰์‹œ์ผœ์•ผ ๊ทธ ๊ฒฐ๊ณผ๊ฐ’์„ ์•Œ ์ˆ˜ ์žˆ์œผ๋ฏ€๋กœ ๋‚ด๊ฐ€ ์กฐ์‚ฌํ•˜๊ณ  ์‹ถ์€ ๋…ธ๋“œ๋ฅผ ๋ชจ๋‘ session์œผ๋กœ ์‹คํ–‰์‹œํ‚ค๊ณ 

๊ทธ ๊ฒฐ๊ณผ๊ฐ’์„ printํ•˜์ž.

์ „์ฒด์ฝ”๋“œ

import tensorflow as tf
tf.set_random_seed(777)  # ๊ฐ™์€ ๊ฐ’์ด ๋‚˜์˜ค๋„๋ก ํ•˜๊ธฐ ์œ„ํ•ด์„œ
 
# ๋ฐ์ดํ„ฐ ์ค€๋น„
x_train = [1, 2, 3]
y_train = [1, 2, 3]
 
# ์ดˆ๊ธฐ์— ๋žœ๋คํ•˜๊ฒŒ ๊ฐ’์„ ํ• ๋‹นํ•œ๋‹ค.
W = tf.Variable(tf.random_normal([1]), name = 'weight')
b = tf.Variable(tf.random_normal([1]), name="bias")
 
# ์šฐ๋ฆฌ์˜ ๊ฐ€์„ค, XW + b
# ์„ ํ˜•์œผ๋กœ ์ƒ๊ฒผ์„ ๊ฒƒ์ด๋‹ค, ์˜ˆ์ธก๊ฐ’
hypothesis = x_train * W + b
 
# cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - y_train))
 
# optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.01)
train = optimizer.minimize(cost)
 
# session ๋…ธ๋“œ ๋งŒ๋“ค๊ธฐ
sess = tf.Session()
 
# ๋ณ€์ˆ˜ ์ดˆ๊ธฐํ™”ํ•˜๊ธฐ
sess.run(tf.global_variables_initializer())
 
# ํ›ˆ๋ จ ์ง„ํ–‰, printing
for step in range(2001):
    sess.run(train)
    if step % 20 == 0:
        print(step, sess,run(cost), sess.run(W), sess.run(b))

์ด ๋•Œ, with ๋ฌธ์„ ์‚ฌ์šฉํ•˜์—ฌ ์กฐ๊ธˆ๋” ๊ฐ„๊ฒฐํ•˜๊ฒŒ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋‹ค.

๋˜ํ•œ, ๋‚ด๊ฐ€ ํ•„์š”ํ•œ ๋…ธ๋“œ๋“ค์„ ๋ณ€์ˆ˜๋กœ ์ฒ˜์Œ๋ถ€ํ„ฐ ๋ฐ›์€ ๋’ค,

์ถœ๋ ฅํ•ด์ฃผ๋Š”๊ฒŒ ๋ณด๋‹ค ๊น”๋”ํ•˜๋‹ค.

# Launch the graph in a session.
with tf.Session() as sess:
    # Initializes global variables in the graph.
    sess.run(tf.global_variables_initializer())
 
    # Fit the line
    for step in range(2001):
        _, cost_val, W_val, b_val = sess.run([train, cost, W, b])
 
        if step % 20 == 0:
            print(step, cost_val, W_val, b_val)
  • ๊ฒฐ๊ณผ

    0 3.5240757 [2.1286771] [-0.8523567]
    20 0.19749945 [1.533928] [-1.0505961]
    40 0.15214379 [1.4572546] [-1.0239124]
    60 0.1379325 [1.4308538] [-0.9779527]
    80 0.12527025 [1.4101374] [-0.93219817]
    100 0.11377233 [1.3908179] [-0.8884077]
    120 0.10332986 [1.3724468] [-0.8466577]
    140 0.093845844 [1.3549428] [-0.80686814]
    160 0.08523229 [1.3382617] [-0.7689483]
    180 0.07740932 [1.3223647] [-0.73281056]
    200 0.07030439 [1.3072149] [-0.6983712]
    220 0.06385162 [1.2927768] [-0.6655505]
    240 0.05799109 [1.2790174] [-0.63427216]
    260 0.05266844 [1.2659047] [-0.6044637]
    280 0.047834318 [1.2534081] [-0.57605624]
    300 0.043443877 [1.2414987] [-0.5489836]
    320 0.0394564 [1.2301493] [-0.5231833]
    340 0.035834935 [1.2193329] [-0.49859545]
    360 0.032545824 [1.2090251] [-0.47516325]
    380 0.029558638 [1.1992016] [-0.45283225]
    400 0.026845641 [1.18984] [-0.4315508]
    420 0.024381675 [1.1809182] [-0.41126958]
    440 0.02214382 [1.1724157] [-0.39194146]
    460 0.020111356 [1.1643128] [-0.37352163]
    480 0.018265454 [1.1565907] [-0.35596743]
    500 0.016588978 [1.1492316] [-0.33923826]
    520 0.015066384 [1.1422179] [-0.3232953]
    540 0.01368351 [1.1355343] [-0.30810148]
    560 0.012427575 [1.1291647] [-0.29362184]
    580 0.011286932 [1.1230947] [-0.2798227]
    600 0.010250964 [1.1173096] [-0.26667204]
    620 0.009310094 [1.1117964] [-0.25413945]
    640 0.008455581 [1.1065423] [-0.24219586]
    660 0.0076795053 [1.1015354] [-0.23081362]
    680 0.006974643 [1.0967635] [-0.21996623]
    700 0.0063344706 [1.0922159] [-0.20962858]
    720 0.0057530706 [1.0878822] [-0.19977672]
    740 0.0052250377 [1.0837522] [-0.19038804]
    760 0.004745458 [1.0798159] [-0.18144041]
    780 0.004309906 [1.076065] [-0.17291337]
    800 0.003914324 [1.0724902] [-0.16478711]
    820 0.0035550483 [1.0690835] [-0.1570428]
    840 0.0032287557 [1.0658368] [-0.14966238]
    860 0.0029324207 [1.0627428] [-0.14262886]
    880 0.0026632652 [1.059794] [-0.13592596]
    900 0.0024188235 [1.056984] [-0.12953788]
    920 0.0021968128 [1.0543059] [-0.12345006]
    940 0.001995178 [1.0517538] [-0.11764836]
    960 0.0018120449 [1.0493214] [-0.11211928]
    980 0.0016457299 [1.0470035] [-0.10685005]
    1000 0.0014946823 [1.0447946] [-0.10182849]
    1020 0.0013574976 [1.0426894] [-0.09704296]
    1040 0.001232898 [1.0406833] [-0.09248237]
    1060 0.0011197334 [1.038771] [-0.08813594]
    1080 0.0010169626 [1.0369489] [-0.08399385]
    1100 0.0009236224 [1.0352125] [-0.08004645]
    1120 0.0008388485 [1.0335577] [-0.07628451]
    1140 0.0007618535 [1.0319806] [-0.07269943]
    1160 0.0006919258 [1.0304775] [-0.06928282]
    1180 0.00062842044 [1.0290452] [-0.06602671]
    1200 0.0005707396 [1.0276802] [-0.06292368]
    1220 0.00051835255 [1.0263793] [-0.05996648]
    1240 0.00047077626 [1.0251396] [-0.05714824]
    1260 0.00042756708 [1.0239582] [-0.0544625]
    1280 0.00038832307 [1.0228322] [-0.05190301]
    1300 0.00035268333 [1.0217593] [-0.04946378]
    1320 0.0003203152 [1.0207369] [-0.04713925]
    1340 0.0002909189 [1.0197623] [-0.0449241]
    1360 0.00026421514 [1.0188333] [-0.04281275]
    1380 0.0002399599 [1.0179482] [-0.04080062]
    1400 0.00021793543 [1.0171047] [-0.03888312]
    1420 0.00019793434 [1.0163009] [-0.03705578]
    1440 0.00017976768 [1.0155348] [-0.03531429]
    1460 0.00016326748 [1.0148047] [-0.03365463]
    1480 0.00014828023 [1.0141089] [-0.03207294]
    1500 0.00013467176 [1.0134459] [-0.03056567]
    1520 0.00012231102 [1.0128139] [-0.02912918]
    1540 0.0001110848 [1.0122118] [-0.0277602]
    1560 0.000100889745 [1.0116379] [-0.02645557]
    1580 9.162913e-05 [1.011091] [-0.02521228]
    1600 8.322027e-05 [1.0105698] [-0.02402747]
    1620 7.5580865e-05 [1.0100728] [-0.02289824]
    1640 6.8643785e-05 [1.0095996] [-0.02182201]
    1660 6.234206e-05 [1.0091484] [-0.02079643]
    1680 5.662038e-05 [1.0087185] [-0.01981908]
    1700 5.142322e-05 [1.0083088] [-0.01888768]
    1720 4.6704197e-05 [1.0079182] [-0.01800001]
    1740 4.2417145e-05 [1.0075461] [-0.01715406]
    1760 3.852436e-05 [1.0071915] [-0.01634789]
    1780 3.4988276e-05 [1.0068535] [-0.01557961]
    1800 3.1776715e-05 [1.0065314] [-0.01484741]
    1820 2.8859866e-05 [1.0062244] [-0.0141496]
    1840 2.621177e-05 [1.005932] [-0.01348464]
    1860 2.380544e-05 [1.0056531] [-0.01285094]
    1880 2.1620841e-05 [1.0053875] [-0.012247]
    1900 1.9636196e-05 [1.0051342] [-0.01167146]
    1920 1.7834054e-05 [1.004893] [-0.01112291]
    1940 1.6197106e-05 [1.0046631] [-0.01060018]
    1960 1.4711059e-05 [1.004444] [-0.01010205]
    1980 1.3360998e-05 [1.0042351] [-0.00962736]
    2000 1.21343355e-05 [1.0040361] [-0.00917497]
    

์‹คํ–‰์„ ๋งŽ์ดํ•˜๋ฉด ํ•  ์ˆ˜๋ก, cost๊ฐ€ 0์— ๊ทผ์ ‘ํ•˜๊ณ  ์žˆ์Œ์„ ์•Œ ์ˆ˜ ์žˆ๋‹ค.

Computaional Graph

Placeholder๋กœ ๊ตฌํ˜„ํ•ด๋ณด๊ธฐ

import tensorflow as tf
tf.set_random_seed(777)  # for reproducibility
 
# Try to find values for W and b to compute Y = W * X + b
# ์ดˆ๊ธฐ๊ฐ’์€ random์œผ๋กœ ์„ค์ •ํ•œ๋‹ค.
W = tf.Variable(tf.random_normal([1]), name="weight")
b = tf.Variable(tf.random_normal([1]), name="bias")
 
# ๋ฐ์ดํ„ฐ๋ฅผ ๋จน์ด๋Š” ๊ฒƒ์€ ๋ณดํ†ต placeholder๋ฅผ ์‚ฌ์šฉํ•œ๋‹ค.
X = tf.placeholder(tf.float32, shape=[None])
Y = tf.placeholder(tf.float32, shape=[None])
 
# Our hypothesis is X * W + b
hypothesis = X * W + b
 
# cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - Y))
 
# optimizer
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
 
# Launch the graph in a session.
with tf.Session() as sess:
    # Initializes global variables in the graph.
    sess.run(tf.global_variables_initializer())
 
    # Fit the line
    for step in range(2001):
      # ์„ธ์…˜์„ ์‹คํ–‰ ์‹œํ‚ค๊ณ , ๋‚ด๊ฐ€ ์›ํ•˜๋Š” ๊ทธ๋ž˜ํ”„๋ฅผ ์‹คํ–‰ํ•˜๋ฉด์„œ, ๋ฐ์ดํ„ฐ๋ฅผ ๊ฐ™์ด feeding ํ•ด์ค€๋‹ค.
        _, cost_val, W_val, b_val = sess.run(
            [train, cost, W, b], feed_dict={X: [1, 2, 3], Y: [1, 2, 3]}
        )
        if step % 20 == 0:
            print(step, cost_val, W_val, b_val)
 
    # Testing our model
    # ์œ„์—์„œ๋Š” ํ›ˆ๋ จ์„ ์‹œํ‚ค๋ฉฐ ๋ณ€ํ™”ํ•˜๋Š” ๊ฐ’์„ ๋ณด๊ธฐ ์œ„ํ•ด ์—ฌ๋Ÿฌ๊ฐœ์˜ ๊ทธ๋ž˜ํ”„ ๊ฐ’์„ ๋ฐ›์•„ ์ถœ๋ ฅํ–ˆ์ง€๋งŒ
    # ์œ„์—์„œ ํ›ˆ๋ จ์ด ๋œ ์ƒํƒœ์—์„œ ๋‚ด๊ฐ€ ํŠน์ •๊ฐ’์„ ๋„ฃ๊ณ  ๊ฒฐ๊ณผ๋งŒ ๋ณด๊ธฐ ์›ํ•œ๋‹ค๋ฉด
    # hypothesis ๋…ธ๋“œ๋งŒ ์‹คํ–‰ํ•˜๊ณ , ์ด ๋…ธ๋“œ์— ๋„ฃ์–ด์ค„ ๋ฐ์ดํ„ฐ๋งŒ feeding ํ•ด์ฃผ๋ฉด ๋œ๋‹ค.
    # ๋งŒ์•ฝ X, Y์—ญ์‹œ variable๋กœ ์„ ์–ธํ–ˆ๋‹ค๋ฉด ์ฝ”๋“œ ์œ—๋‹จ์—์„œ ๊ฐ’์„ ๋ณ€๊ฒฝํ•ด์ค˜์•ผ ํ•œ๋‹ค.
    # ๊ทธ๋ ‡๊ธฐ ๋•Œ๋ฌธ์— placeholder ๊ฐ์ฒด๋ฅผ ๋งŒ๋“ค์–ด์„œ ์‚ฌ์šฉํ•œ๋‹ค.
    print(sess.run(hypothesis, feed_dict={X: [5]}))
    print(sess.run(hypothesis, feed_dict={X: [2.5]}))
    print(sess.run(hypothesis, feed_dict={X: [1.5, 3.5]}))
 
    # Learns best fit W:[ 1.],  b:[ 0]
    """
    0 3.5240757 [2.2086694] [-0.8204183]
    20 0.19749963 [1.5425726] [-1.0498911]
    ...
    1980 1.3360998e-05 [1.0042454] [-0.00965055]
    2000 1.21343355e-05 [1.0040458] [-0.00919707]
    [5.0110054]
    [2.500915]
    [1.4968792 3.5049512]
    """
 
    # Fit the line with new training data
    for step in range(2001):
        _, cost_val, W_val, b_val = sess.run(
            [train, cost, W, b],
            feed_dict={X: [1, 2, 3, 4, 5], Y: [2.1, 3.1, 4.1, 5.1, 6.1]},
        )
        if step % 20 == 0:
            print(step, cost_val, W_val, b_val)
 
    # Testing our model
    print(sess.run(hypothesis, feed_dict={X: [5]}))
    print(sess.run(hypothesis, feed_dict={X: [2.5]}))
    print(sess.run(hypothesis, feed_dict={X: [1.5, 3.5]}))
0 1.2035878 [1.0040361] [-0.00917497]
20 0.16904518 [1.2656431] [0.13599995]
...
1980 2.9042917e-07 [1.00035] [1.0987366]
2000 2.5372992e-07 [1.0003271] [1.0988194]
[6.1004534]
[3.5996385]
[2.5993123 4.599964 ]

2000๋ฒˆ ํ›ˆ๋ จํ•œ ๊ฒฐ๊ณผ, W = 1.0003271, b = 1.0988194 ๊ฐ€ ๋‚˜์™”๋‹ค.

๊ทธ๋ฆฌ๊ณ  ์ด ํ›ˆ๋ จ๋œ ๋ชจ๋ธ์— ๋Œ€ํ•ด ๋‹ค๋ฅธ feed๋ฅผ ์คฌ์„ ๋•Œ์˜ hypothesis(์˜ˆ์ธก๊ฐ’)๋„ ์•„๋ž˜์— ์ถœ๋ ฅ๋˜์—ˆ๋‹ค.

๋‹ค๋ฅธ feed์— ๋Œ€ํ•ด์„œ๋„ ํ›ˆ๋ จํ•ด๋ณด์ž.

# Fit the line with new training data
    for step in range(2001):
        _, cost_val, W_val, b_val = sess.run(
            [train, cost, W, b],
            feed_dict={X: [1, 2, 3, 4, 5], Y: [2.1, 3.1, 4.1, 5.1, 6.1]},
        )
        if step % 20 == 0:
            print(step, cost_val, W_val, b_val)
 
    # Testing our model
    print(sess.run(hypothesis, feed_dict={X: [5]}))
    print(sess.run(hypothesis, feed_dict={X: [2.5]}))
    print(sess.run(hypothesis, feed_dict={X: [1.5, 3.5]}))
0 1.2035878 [1.0040361] [-0.00917497]
20 0.16904518 [1.2656431] [0.13599995]
...
1980 2.9042917e-07 [1.00035] [1.0987366]
2000 2.5372992e-07 [1.0003271] [1.0988194]
[6.1004534]
[3.5996385]
[2.5993123 4.599964 ]

์ฃผ์„ ๋บ€ ์ฝ”๋“œ

# Lab 2 Linear Regression
import tensorflow as tf
tf.set_random_seed(777)  # for reproducibility
 
# Try to find values for W and b to compute Y = W * X + b
W = tf.Variable(tf.random_normal([1]), name="weight")
b = tf.Variable(tf.random_normal([1]), name="bias")
 
# placeholders for a tensor that will be always fed using feed_dict
# See http://stackoverflow.com/questions/36693740/
X = tf.placeholder(tf.float32, shape=[None])
Y = tf.placeholder(tf.float32, shape=[None])
 
# Our hypothesis is X * W + b
hypothesis = X * W + b
 
# cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - Y))
 
# optimizer
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
 
# Launch the graph in a session.
with tf.Session() as sess:
    # Initializes global variables in the graph.
    sess.run(tf.global_variables_initializer())
 
    # Fit the line
    for step in range(2001):
        _, cost_val, W_val, b_val = sess.run(
            [train, cost, W, b], feed_dict={X: [1, 2, 3], Y: [1, 2, 3]}
        )
        if step % 20 == 0:
            print(step, cost_val, W_val, b_val)
 
    # Testing our model
    print(sess.run(hypothesis, feed_dict={X: [5]}))
    print(sess.run(hypothesis, feed_dict={X: [2.5]}))
    print(sess.run(hypothesis, feed_dict={X: [1.5, 3.5]}))
 
    # Learns best fit W:[ 1.],  b:[ 0]
    """
    0 3.5240757 [2.2086694] [-0.8204183]
    20 0.19749963 [1.5425726] [-1.0498911]
    ...
    1980 1.3360998e-05 [1.0042454] [-0.00965055]
    2000 1.21343355e-05 [1.0040458] [-0.00919707]
    [5.0110054]
    [2.500915]
    [1.4968792 3.5049512]
    """
 
    # Fit the line with new training data
    for step in range(2001):
        _, cost_val, W_val, b_val = sess.run(
            [train, cost, W, b],
            feed_dict={X: [1, 2, 3, 4, 5], Y: [2.1, 3.1, 4.1, 5.1, 6.1]},
        )
        if step % 20 == 0:
            print(step, cost_val, W_val, b_val)
 
    # Testing our model
    print(sess.run(hypothesis, feed_dict={X: [5]}))
    print(sess.run(hypothesis, feed_dict={X: [2.5]}))
    print(sess.run(hypothesis, feed_dict={X: [1.5, 3.5]}))
 
    # Learns best fit W:[ 1.],  b:[ 1.1]
    """
    0 1.2035878 [1.0040361] [-0.00917497]
    20 0.16904518 [1.2656431] [0.13599995]
    ...
    1980 2.9042917e-07 [1.00035] [1.0987366]
    2000 2.5372992e-07 [1.0003271] [1.0988194]
    [6.1004534]
    [3.5996385]
    [2.5993123 4.599964 ]
    """