1.앙상블 모델을 사용하여 캐글에서 고득점 받기(빈칸 채우기)
mnist 앙상블 모델 구현
https://www.kaggle.com/c/digit-recognizer
위의 링크에서 데이터 다운 받기
앙상블 사용
인셉션 모델+잔차 모델+기본 CNN모델






import tensorflow as tf
import keras
import matplotlib as plt
import pandas as pd
import numpy as np
import seaborn as sns
from keras import models
from keras import layers
test = pd.read_csv("test.csv")
train = pd.read_csv("train.csv")
train_target = train['label']
train.drop(columns='label', inplace = True)
train_data = []
for x in range(len(train)):
train_data.append(np.array(train.loc[x]).reshape(28,28))
train_data = np.array(train_data)
test_data = []
for x in range(len(test)):
test_data.append(np.array(test.loc[x]).reshape(28,28))
test_data = np.array(test_data)
train_data =
test_data =
#train_data와 test_data 데이터 정규화
train_data = train_data.reshape(-1, 28, 28, 1)
test_data = test_data.reshape(-1, 28, 28, 1)
callbacks_list =[
keras.callbacks.EarlyStopping(
monitor = '_______', #검증 정확도 모니터링
patience = 1,
)
]
CNN_model = models.Sequential()
CNN_model.add(layers.Conv2D(filters = __, kernel_size = (_,_),padding = 'Same', activation ='relu', input_shape = (28,28,1)))
CNN_model.add(layers.Conv2D(filters = __, kernel_size = (_,_),padding = 'Same', activation ='relu'))
CNN_model.add(layers.MaxPool2D(pool_size=(_,_)))
CNN_model.add(layers.Dropout(0.2))
CNN_model.add(layers.__________)
CNN_model.add(layers.Dense(64, activation = "relu"))
CNN_model.add(layers.Dropout(0.5))
CNN_model.add(layers.Dense(__,activation="__")
CNN_model.compile(optimizer = "rmsprop",
loss = "sparse_categorical_crossentropy",
metrics=["accuracy"])
CNN_model.fit(train_data,
train_target,
epochs = 10,
batch_size = 32,
callbacks=callbacks_list)
from keras import Input
x = Input(shape=(28,28,1))
branch_a = layers.Conv2D(filters = __, kernel_size = (_,_),padding = 'Same', strides = _, activation ='relu')(__)
branch_b = layers.Conv2D(filters = __, kernel_size = (_,_),padding = 'Same', activation ='relu')(__)
branch_b = layers.Conv2D(filters = __, kernel_size = (_,_),padding = 'Same', strides = _, activation ='relu')(__)
branch_c = layers.MaxPool2D(_)(__)
branch_c = layers.Conv2D(filters = __, kernel_size = (_,_),padding = 'Same', activation ='relu')(__)
#Q. branch_b의 두번째 Conv2D의 strides를 4로 바꿔도 될까? 그리고 그렇게 생각한 이유를 쓰시오
#A.
output = layers.concatenate([branch_a,branch_b,branch_c],axis = -1)
output = layers._______(output)
output = layers.Dense(256, activation = "relu")(output)
output = layers.Dropout(0.5)(output)
output = layers.Dense(__, activation = "______")(output)
Inception_model = models.Model(x,output)
Inception_model.compile(optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=["accuracy"])
Inception_model.fit(train_data,
train_target,
epochs = 10,
batch_size = 16,
callbacks=callbacks_list)
Residual_x = layers.Conv2D(filters = __, kernel_size = (_,_),padding = 'Same', strides = __, activation ='relu')(x)
Residual_x = layers.Conv2D(filters = __, kernel_size = (_,_),padding = 'Same', activation ='relu')(Residual_x)
Residual_y = layers.Dropout(0.2)(Residual_x)
Residual_y = layers.Dense(64, activation = "relu")(Residual_y)
#Q. 만약 이부분에 Residual_y = layers.MaxPooling2D(2)(Residual_y)을 추가해도 될까?
#그렇게 생각한 이유는?
#A.
Residual_y = layers.add([Residual_y,Residual_x])
Residual_output = layers._________(Residual_y)
Residual_output = layers.Dense(128, activation = "relu")(Residual_output)
Residual_output = layers.Dropout(0.5)(Residual_output)
Residual_output = layers.Dense(__, activation = "_______")(Residual_output)
Residual_model = models.Model(x,Residual_output)
Residual_model.compile(optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=["accuracy"])
Residual_model.fit(train_data,
train_target,
epochs = 10,
batch_size = 16,
callbacks=callbacks_list)
predict_a = CNN_model.predict(test_data)
predict_b = Inception_model.predict(test_data)
predict_c = Residual_model.predict(test_data)
ensemble = ____________________________ #가중치는 동일하다.
print(ensemble)