이론문제 1번 : 1, 4번

  1. GoogLeNet은 VGG19보다 적은 층으로 구성되어 있고 1x1 크기의 커널을 이용해 특성 개수를 줄여 VGG보다 성능이 더 좋다.

→ GoogLeNet은 22개의 층으로 구성, VGG19는 19층으로 형성되어 더 많은 층으로 구성된다.

  1. CNN의 대표적인 모델은 VGGNet, AlexNet, GoogLeNet, ResNet 순으로 개발되었다. → AlexNet, VGGNet 순입니다. 참고: https://junklee.tistory.com/19

이론문제 2번 : 1,5번

1. convolutional neural network에서 filter와 kernel은 같은 말로 사용하지만 뉴런은 다른 의미로 사용된다.p.425 : 합성곱 신경망에서는 완전연결신경망과 달리 뉴런을 필터라고 부릅니다. 혹은 커널이라고도 부릅니다.

5. 합성곱에서는 활성화 출력이란 표현을 잘 쓴다.p.429 : ...일반적으로 특성맵은 활성화 함수를 통과한 값을 나타냅니다. 합성곱에서는 활성화 출력이란 표현을 잘 쓰지 않습니다.

실습 3번

방법 1.

#!pip install tensorflow-datasets
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow import keras
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense

#load dataset
dataset_name = 'horses_or_humans'
(train_raw, val_raw, test_raw), info = tfds.load(name=dataset_name,
                    split=['train[:80%]', 'train[80%:90%]', 'train[90%:]'],
                    with_info=True,
                    as_supervised=True,
                    shuffle_files=True,
                    batch_size = None)

#resize img
def img_resize(image, label):
    image = tf.cast(image, tf.float32)
    image = image / 255.0
    image = tf.image.resize(image, (224, 224))
    return image, label

train = train_raw.map(img_resize)
val = val_raw.map(img_resize)
test = test_raw.map(img_resize)

#model
model = keras.Sequential()
model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
model.add(Flatten())
model.add(Dense(units=4096,activation="relu"))
model.add(Dense(units=4096,activation="relu"))
model.add(Dense(units=1000,activation="relu"))
model.add(Dense(units=1000,activation='softmax'))

model.summary()

#fit model
opt = keras.optimizers.Adam(learning_rate=0.0001)
model.compile(optimizer=opt, loss='sparse_categorical_crossentropy',metrics='accuracy')
checkpoint_cb = keras.callbacks.ModelCheckpoint('best-cnn-model.h5',
                                               save_best_only=True)
early_stopping_cb = keras.callbacks.EarlyStopping(patience=2,
                                                 restore_best_weights=True)

history = model.fit(train, epochs=20,
                   validation_data = val,
                    batch_size=16,
                   callbacks=[checkpoint_cb, early_stopping_cb])

#fit result
import matplotlib.pyplot as plt
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['train', 'val'])
plt.show()
model.evaluate(test)

방법 2.

#!pip install tensorflow-datasets
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
from tensorflow import keras
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense

#load dataset
dataset_name = 'horses_or_humans'
(train_raw, val_raw, test_raw), info = tfds.load(name=dataset_name,
                    split=['train[:80%]', 'train[80%:90%]', 'train[90%:]'],
                    with_info=True,
                    as_supervised=True,
                    shuffle_files=True,
                    batch_size = None)

#resize img
def img_resize(image, label):
    image = tf.cast(image, tf.float32)
    image = image / 255.0
    image = tf.image.resize(image, (224, 224))
    return image, label

train = train_raw.map(img_resize)
val = val_raw.map(img_resize)
test = test_raw.map(img_resize)

print(info)

train_data = np.array([])
train_target = np.array([])
val_data = np.array([])
val_target = np.array([])
test_data = np.array([])
test_target = np.array([])

for inputs in tfds.as_numpy(train):
  data, target = inputs
  train_data = np.append([train_data], data)
  train_target = np.append([train_target], target)

for inputs in tfds.as_numpy(val):
  data, target = inputs
  val_data = np.append([val_data], data)
  val_target = np.append([val_target], target)

for inputs in tfds.as_numpy(test):
  data, target = inputs
  test_data = np.append([test_data], data)
  test_target = np.append([test_target], target)

train_data = train_data.reshape(-1, 224, 224, 3);
val_data = val_data.reshape(-1, 224, 224, 3);
test_data = test_data.reshape(-1, 224, 224, 3);

print(test_data[:3])

#model
model = keras.Sequential()
model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
model.add(Flatten())
model.add(Dense(units=4096,activation="relu"))
model.add(Dense(units=4096,activation="relu"))
model.add(Dense(units=1000,activation="relu"))
model.add(Dense(units=1000,activation='softmax'))

model.summary()

#fit model
opt = keras.optimizers.Adam(learning_rate=0.0001)
model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics='accuracy')
checkpoint_cb = keras.callbacks.ModelCheckpoint('best-cnn-model.h5',
                                               save_best_only=True)
early_stopping_cb = keras.callbacks.EarlyStopping(patience=2,
                                                 restore_best_weights=True)
history = model.fit(train_data, train_target, epochs=20,
                   validation_data = (val_data, val_target),
                    batch_size=16,
                   callbacks=[checkpoint_cb, early_stopping_cb])

#fit result
import matplotlib.pyplot as plt
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['train', 'val'])
plt.show()

#model test
model.evaluate(test_data, test_target)

Untitled

실습 3번 이 방법대로 데이터셋 수정하지 않고 학습하면, model.fit 할 때 데이터 크기 맞지 않는다고 오류나는데, 해결 방법 아시는 분 있으신가요? 저는 numpy배열로 변환해서 훈련했습니다 _ 채현우 방법2에 numpy가 임포트 되지 않아 명령문 추가해 두었습니다 _ 김선엽

이론문제 4번 : 3, 5번