실습문제

문제 1

해결 못 함

문제 2

import numpy as np
import matplotlib.pyplot as plt
num_sample_per_class = 1000
sample1 = np.random.multivariate_normal(
    mean=[5,1],
    cov=[[1, 0.5],[0.5, 1]],
    size=num_sample_per_class
)
sample2 = np.random.multivariate_normal(
    mean=[1,5],
    cov=[[1, 0.5],[0.5, 1]],
    size=num_sample_per_class
)
inputs = np.vstack((sample1,sample2)).astype('float32')
targets = np.vstack((np.zeros((num_sample_per_class, 1), dtype='float32'),
                     np.ones((num_sample_per_class, 1),dtype='float32')))

plt.scatter(inputs[:, 0], inputs[:, 1], c=targets[:, 0])
plt.show()

스크린샷 2022-10-03 오후 6.35.00.png

문제 3

import tensorflow as tf
import numpy as np
inputs = [16.1,9.4,10.2,24.2,9.6,2.3,22.4,7,10.3,15.7,5.1,24.6,12.5,13.2,23.6,15.4,5.8,7.3,13.9,21.7,21.5,17.7,0.5,6.1,23.5,8.6,7.4,9.9,10,6,17.4,3,2.4,21.7,8.9,24.9,13.2,1.6,15.5,10.8,19,4.8,8.8,0.9,21.9,7.9,20.9,16.9,19.4,4.8,4,17.8,8.2,22.4,21.5,18.7,0.5,16.3,21.5,6.4,25,4.7,21.7,18,9.4,7.7,17.5,17.2,2.4,20.2,7,21.4,22.7,13.5,22,0.4,11.8,3.3,6.8,6.5,2.6,17.2,15,18.4,12.8,18,13.5,14.4,19,24.1,14,11.6,17.4,23.7,12.3,24.7,3.8,24.1,7.3,14.1]
targets = [307.2,284.3,288.5,337.8,284.7,262.5,324.3,280.1,287.1,307.3,272.2,337.5,297.6,298.5,331.2,304.2,274.4,279.6,301.4,326.4,328.4,313.7,257.5,274.1,329.9,282.7,277.9,287.3,286.2,273.4,309.7,265.4,263.4,322.6,285,338.2,297.5,262.3,305,290.9,317.5,271.1,284.6,258.6,329.2,281.4,324.9,310.7,320.2,272.8,267.8,313.6,283.5,326.4,326.3,318.1,257.9,306.3,327.3,275.8,336.9,270.9,327.1,313,285.6,279.6,313.4,309.1,263.4,318.4,277.3,326,332.7,302,326.4,257.9,292.2,267.3,277.7,276.1,265.4,307.4,305.5,316.3,295.3,311.5,298.4,299.3,319.1,333.7,298.7,294.3,310.9,330.7,296.3,330.9,268,333.5,277.4,303.5]

# 변수 만들기
W = tf.Variable(0.)
b = tf.Variable(0.)

# 정방향 패스 함수
def model(inputs) :
    return W*inputs+b

def square_loss(targets, predictions):
    per_sample_losses = tf.square(targets - predictions)
    return tf.reduce_mean(per_sample_losses)

learning_rate = 0.0025

def training_step(inputs, targets):
    with tf.GradientTape() as tape:
        predictions = model(inputs)
        loss = square_loss(targets, predictions)
    grad_loss_wrt_W, grad_loss_wrt_b = tape.gradient(loss, [W, b])
    W.assign_sub(grad_loss_wrt_W * learning_rate)
    b.assign_sub(grad_loss_wrt_b * learning_rate)
    return loss

for step in range(5000):
    training_step(inputs, targets)

#그래프에 출력
import matplotlib.pyplot as plt
x = [0,25]
y = W*x + b
plt.plot(x, y, "-r")
plt.scatter(inputs[:],targets[:])
plt.show()

스크린샷 2022-10-03 오후 8.34.25.png

이론문제

문제 1

3

문제 2

1,2,4

문제 3

2

문제 4

5

문제 5

2