주제를 딱히 정하기 싫을때

Keras Cat Dog 분류 - 8. 딥러닝 시작하기 - 과대적합

Bigcat 2019. 6. 13. 23:20

설치 부터 실제 분류까지 keras로

 Cat과 Dog 데이터 셋으로 끝까지 해보기 

 

2019/06/13 - [주제를 딱히 정하기 싫을때] - Keras Cat Dog 분류 - 7. 딥러닝 시작하기 - 텐션보드 사용하기

 

Keras Cat Dog 분류 - 7. 딥러닝 시작하기 - 텐션보드 사용하기

" 설치 부터 실제 분류까지 keras로 Cat과 Dog 데이터 셋으로 끝까지 해보기 2019/06/13 - [주제를 딱히 정하기 싫을때] - Keras Cat Dog 분류 - 6. 딥러닝 시작하기 - 모델 구성 Keras Cat Dog 분류 - 6. 딥러닝..

redapply.tistory.com

11 과대적합 

  • 설명은 챕터 6을 참고

  • MaxPolling2D 

  • Dropout 적용

def cnn_api2(input_shape):
    
    input_tensor =Input(input_shape, name = "input")
    
    x = layers.Conv2D(filters= 32 ,kernel_size= (3,3) , padding= "same", activation='relu')(input_tensor)
    x = layers.MaxPooling2D(pool_size=(2,2))(x)
    x = layers.Dropout(rate=0.25)(x)
    
    x = layers.Conv2D(filters= 64 ,kernel_size= (3,3) , padding= "same", activation='relu')(x)
    x = layers.MaxPooling2D(pool_size=(2,2))(x)
    x = layers.Dropout(rate=0.25)(x)
    
    x = layers.Conv2D(filters= 128 ,kernel_size= (3,3) , padding= "same", activation='relu')(x)
    x = layers.MaxPooling2D(pool_size= (2,2))(x)
    x = layers.Dropout(rate= 0.25)(x)
    
    x = layers.Flatten()(x)
    x = layers.Dense(units= 1024 , activation='relu')(x)
    x = layers.Dropout(rate= 0.25)(x)
    
    output_tensor = layers.Dense(units= no_classes, activation= 'sigmoid', name= "output")(x)
    
    model = models.Model([input_tensor],[output_tensor])
    
    model.compile(loss = losses.binary_crossentropy, optimizer= optimizers.RMSprop(lr=0.0001),                     metrics=    ['acc'])
    return model
  • 저장할 모델을 식별하기위해 프로젝트 name을 바꿔줍니다.

project_name = 'dog_cat_CNN_api2_model'

 

  • 새롭게 적용될 모델도 바꿔줍니다

newType_model = cnn_api2(input_shape)

 

 

  • 전체 코드는 다음과 같습니다.

from datetime import datetime
import os
import keras

save_dir = './my_log'

if not os.path.isdir(save_dir):
    os.makedirs(save_dir)

    
project_name = 'dog_cat_CNN_api2_model'

def save_file():
    time = datetime.today()
    yy = time.year
    mon = time.month
    dd = time.day
    hh = time.hour
    mm = time.minute
    sec = time.second
    time_name = str(yy) +  str(mon) + str(dd) + str(hh) + str(mm) +  str(sec) +'_my_' + project_name + '_model.h5'
    file_name = os.path.join(save_dir,time_name)
    return file_name

callbacks = [
    
    keras.callbacks.TensorBoard(
    log_dir = save_dir,
    write_graph=True,
    write_images=True
    ),
    
    keras.callbacks.EarlyStopping(
    monitor = 'val_acc',
        patience=10,
    ),
    keras.callbacks.ModelCheckpoint(
    filepath= save_file(),
    monitor = 'val_loss',
    save_best_only = True,
    )
]

from keras import Input
from keras import layers ,models, losses ,optimizers

batch_size = 256
no_classes = 1
epochs = 50
image_height, image_width = 150,150
input_shape = (image_height,image_width,3)

#MaxPolling2d,Dropout 적용

def cnn_api2(input_shape):
    
    input_tensor =Input(input_shape, name = "input")
    
    x = layers.Conv2D(filters= 32 ,kernel_size= (3,3) , padding= "same", activation='relu')(input_tensor)
    x = layers.MaxPooling2D(pool_size=(2,2))(x)
    x = layers.Dropout(rate=0.25)(x)
    
    x = layers.Conv2D(filters= 64 ,kernel_size= (3,3) , padding= "same", activation='relu')(x)
    x = layers.MaxPooling2D(pool_size=(2,2))(x)
    x = layers.Dropout(rate=0.25)(x)
    
    x = layers.Conv2D(filters= 128 ,kernel_size= (3,3) , padding= "same", activation='relu')(x)
    x = layers.MaxPooling2D(pool_size= (2,2))(x)
    x = layers.Dropout(rate= 0.25)(x)
    
    x = layers.Flatten()(x)
    x = layers.Dense(units= 1024 , activation='relu')(x)
    x = layers.Dropout(rate= 0.25)(x)
    
    output_tensor = layers.Dense(units= no_classes, activation= 'sigmoid', name= "output")(x)
    
    model = models.Model([input_tensor],[output_tensor])
    
    model.compile(loss = losses.binary_crossentropy, optimizer= optimizers.RMSprop(lr=0.0001), metrics=['acc'])
    return model

from keras.preprocessing.image import ImageDataGenerator
from PIL import Image

train_datagen = ImageDataGenerator(rescale = 1./255)
val_datagen = ImageDataGenerator(rescale = 1./255) # 검증데이터 스케일 조정만 합니다.

train_generator = train_datagen.flow_from_directory(
    os.path.join(copy_train_path,"train"),
    target_size = (image_height, image_height),
    batch_size = batch_size,
    class_mode = "binary"
    )
validation_generator = val_datagen.flow_from_directory(
    os.path.join(copy_train_path,"validation"),
    target_size = (image_height, image_height),
    batch_size = batch_size,
    class_mode = "binary"
    )

newType_model = cnn_api2(input_shape)
hist = newType_model.fit_generator(train_generator, steps_per_epoch = 20000//batch_size, epochs= epochs,
                                  validation_data = validation_generator, validation_steps = 5000//batch_size,
                                  callbacks = callbacks)


import matplotlib.pyplot as plt
train_acc = hist.history['acc']
val_acc = hist.history['val_acc']

train_loss = hist.history['loss']
val_loss = hist.history['val_loss']
epochs = range(1,len(train_acc)+1)

plt.plot(epochs,train_acc,'bo',label='Training acc')
plt.plot(epochs,val_acc,'r',label='Val acc')
plt.title('Training and Val accuracy')
plt.legend()

plt.figure()
plt.plot(epochs,train_loss,'bo',label='Training loss')
plt.plot(epochs,val_loss,'r',label='Val loss')
plt.title('Training and Val loss')
plt.legend()

plt.show()

 

 

 

 

  • 이전 보다 정확도는 약간 올라갔지만 과대적합이 남아있습니다.

  • 다음은 레이어 층을 늘려 보겠습니다.

 

 

 

변경된 부분

  • 모델 레이어 추가

def cnn_api3(input_shape):
    
    input_tensor =Input(input_shape, name = "input")
    
    x = layers.Conv2D(filters= 32 ,kernel_size= (3,3) , padding= "same", activation='relu')(input_tensor)
    x = layers.Conv2D(filters= 32 ,kernel_size= (3,3) , padding= "same", activation='relu')(x)
    x = layers.MaxPooling2D(pool_size=(2,2))(x)
    x = layers.Dropout(rate=0.25)(x)
    
    x = layers.Conv2D(filters= 64 ,kernel_size= (3,3) , padding= "same", activation='relu')(x)
    x = layers.Conv2D(filters= 64 ,kernel_size= (3,3) , padding= "same", activation='relu')(x)
    x = layers.MaxPooling2D(pool_size=(2,2))(x)
    x = layers.Dropout(rate=0.25)(x)
    
    x = layers.Conv2D(filters= 128 ,kernel_size= (3,3) , padding= "same", activation='relu')(x)
    x = layers.Conv2D(filters= 128 ,kernel_size= (3,3) , padding= "same", activation='relu')(x)
    x = layers.MaxPooling2D(pool_size= (2,2))(x)
    x = layers.Dropout(rate= 0.25)(x)
    
    x = layers.Flatten()(x)
    x = layers.Dense(units= 1024 , activation='relu')(x)
    x = layers.Dropout(rate= 0.25)(x)
    
    output_tensor = layers.Dense(units= no_classes, activation= 'sigmoid', name= "output")(x)
    
    model = models.Model([input_tensor],[output_tensor])
    
    model.compile(loss = losses.binary_crossentropy, optimizer= optimizers.RMSprop(lr=0.0001), metrics=['acc'])
    return model

 

  • 저장할 모델을 식별하기위해 프로젝트 name을 바꿔줍니다.

project_name = 'dog_cat_CNN_api3_model'

 

  • 새롭게 적용될 모델도 바꿔줍니다

newType_model = cnn_api3(input_shape)

 

 

변경된 전체 코드

from datetime import datetime
import os
import keras

save_dir = './my_log'

if not os.path.isdir(save_dir):
    os.makedirs(save_dir)

    
project_name = 'dog_cat_CNN_api3_model'

def save_file():
    time = datetime.today()
    yy = time.year
    mon = time.month
    dd = time.day
    hh = time.hour
    mm = time.minute
    sec = time.second
    time_name = str(yy) +  str(mon) + str(dd) + str(hh) + str(mm) +  str(sec) +'_my_' + project_name + '_model.h5'
    file_name = os.path.join(save_dir,time_name)
    return file_name

callbacks = [
    
    keras.callbacks.TensorBoard(
    log_dir = save_dir,
    write_graph=True,
    write_images=True
    ),
    
    keras.callbacks.EarlyStopping(
    monitor = 'val_acc',
        patience=10,
    ),
    keras.callbacks.ModelCheckpoint(
    filepath= save_file(),
    monitor = 'val_loss',
    save_best_only = True,
    )
]


from keras import Input
from keras import layers ,models, losses ,optimizers

batch_size = 256
no_classes = 1
epochs = 50
image_height, image_width = 150,150
input_shape = (image_height,image_width,3)

def cnn_api3(input_shape):
    
    input_tensor =Input(input_shape, name = "input")
    
    x = layers.Conv2D(filters= 32 ,kernel_size= (3,3) , padding= "same", activation='relu')(input_tensor)
    x = layers.Conv2D(filters= 32 ,kernel_size= (3,3) , padding= "same", activation='relu')(x)
    x = layers.MaxPooling2D(pool_size=(2,2))(x)
    x = layers.Dropout(rate=0.25)(x)
    
    x = layers.Conv2D(filters= 64 ,kernel_size= (3,3) , padding= "same", activation='relu')(x)
    x = layers.Conv2D(filters= 64 ,kernel_size= (3,3) , padding= "same", activation='relu')(x)
    x = layers.MaxPooling2D(pool_size=(2,2))(x)
    x = layers.Dropout(rate=0.25)(x)
    
    x = layers.Conv2D(filters= 128 ,kernel_size= (3,3) , padding= "same", activation='relu')(x)
    x = layers.Conv2D(filters= 128 ,kernel_size= (3,3) , padding= "same", activation='relu')(x)
    x = layers.MaxPooling2D(pool_size= (2,2))(x)
    x = layers.Dropout(rate= 0.25)(x)
    
    x = layers.Flatten()(x)
    x = layers.Dense(units= 1024 , activation='relu')(x)
    x = layers.Dropout(rate= 0.25)(x)
    
    output_tensor = layers.Dense(units= no_classes, activation= 'sigmoid', name= "output")(x)
    
    model = models.Model([input_tensor],[output_tensor])
    
    model.compile(loss = losses.binary_crossentropy, optimizer= optimizers.RMSprop(lr=0.0001), metrics=['acc'])
    return model


from keras.preprocessing.image import ImageDataGenerator
from PIL import Image

train_datagen = ImageDataGenerator(rescale = 1./255)
val_datagen = ImageDataGenerator(rescale = 1./255) # 검증데이터 스케일 조정만 합니다.

train_generator = train_datagen.flow_from_directory(
    os.path.join(copy_train_path,"train"),
    target_size = (image_height, image_height),
    batch_size = batch_size,
    class_mode = "binary"
    )
validation_generator = val_datagen.flow_from_directory(
    os.path.join(copy_train_path,"validation"),
    target_size = (image_height, image_height),
    batch_size = batch_size,
    class_mode = "binary"
    )

newType_model = cnn_api3(input_shape)
hist = newType_model.fit_generator(train_generator, steps_per_epoch = 20000//batch_size, epochs= epochs,
                                  validation_data = validation_generator, validation_steps = 5000//batch_size,
                                  callbacks = callbacks)


import matplotlib.pyplot as plt
train_acc = hist.history['acc']
val_acc = hist.history['val_acc']

train_loss = hist.history['loss']
val_loss = hist.history['val_loss']
epochs = range(1,len(train_acc)+1)

plt.plot(epochs,train_acc,'bo',label='Training acc')
plt.plot(epochs,val_acc,'r',label='Val acc')
plt.title('Training and Val accuracy')
plt.legend()

plt.figure()
plt.plot(epochs,train_loss,'bo',label='Training loss')
plt.plot(epochs,val_loss,'r',label='Val loss')
plt.title('Training and Val loss')
plt.legend()

plt.show()

 

 

 

 

 

음. 그렇게 특별히 나아 보이지를 않습니다.

오히려 정확도가 더 떨어졌습니다.

그리고 36에포크 만에 훈련이 중단 되었군요

 

다음 절에서 데이터 증식을 통해 과대적합을 잡아 보겠습니다.

 

Keras Cat Dog 분류 - 딥러닝 시작하기 .ipynb
0.15MB