본문 바로가기
인공지능/CV

[CODE] ANN, K-fold CV, CNN

by EXUPERY 2021. 4. 10.
반응형

https://lh3.googleusercontent.com/proxy/kx6R3xD_1qNKxXUcTO_ahNss5PsiZrAcKgXRWqmiA-WOsh5vWmnaOqmOGRXnKeQHQGNbN5Hti2ar_CNQSiMCuqHmD9HZ-_QepFpN8HFX05omyYcoYOyJAXnkccWX4iwNWr-gzPpPYJ1Ce8737Tu5ghi8cfY5AhoyTl4fMTIRdNHXON44Y2d7UIAv8pnRwq-LHgirdlGW8IwrDuZnhm8eLjYbfnwgitIVVo-w-QemANc_UPARnCHAjgTfxG8OB-oFWQ18LST0vW78w77rUtGJSMhqL0STgWDBAlhkW6C8RRs

1. ANN

## Load Data
from sklearn.model_selection import train_test_split
X=df
y=df_label.replace(2,0)
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.3, random_state=42)

## Build model
import tensorflow as tf
from tensorflow.keras.metrics import Precision, Recall

model = tf.keras.models.Sequential([
  tf.keras.layers.Dense(12, input_dim=69, activation='relu'),
  tf.keras.layers.Dense(1, activation='sigmoid')
])

model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy','Precision'])
              
## Train
model.fit(X_train,y_train,epochs=100,validation_split=0.2,batch_size=150)

## Evaluate
model.evaluate(X_test,y_test)

2. K-fold

## Load data
from keras.datasets import mnist
from keras.utils import np_utils
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print(f"train set : {X_train.shape[0]} 개")
print(f"test set : {X_test.shape[0]} 개")

## 5-Fold Cross Validation
import tensorflow as tf
from sklearn.model_selection import StratifiedKFold
from keras.callbacks import EarlyStopping

n_fold=5
skf = StratifiedKFold(n_splits=n_fold, shuffle=True, random_state=42)

accuracy=[]

for train, val in skf.split(X_train,y_train):
  model = tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape=(28, 28)),
                                      tf.keras.layers.Dense(512, input_dim=784, activation='relu'),
                                      tf.keras.layers.Dense(256, activation='relu'),
                                      tf.keras.layers.Dropout(0.2),
                                      tf.keras.layers.Dense(256, activation='relu'),
                                      tf.keras.layers.Dense(10, activation='softmax')
                                      ])

  model.compile(loss="sparse_categorical_crossentropy",
              optimizer="adam",
              metrics=['accuracy'])
  
  history = model.fit(X_train[train], y_train[train]
                      ,validation_data=(X_train[val],y_train[val])
                      ,epochs=200
                      ,batch_size=50
                      ,callbacks=EarlyStopping(monitor='loss',patience=10))

  accuracy_history = history.history['accuracy']
  val_accuracy_history = history.history['val_accuracy']


  k_accuracy = f"{model.evaluate(X_train[val],y_train[val])[1]:.4f}"
  accuracy.append([k_accuracy,accuracy_history,val_accuracy_history])
  
## Visualize history
import matplotlib.pyplot as plt
plt.figure(figsize=(20,10))
colours = 'krgby'
for i in range(5):
  plt.plot(accuracy[i][1],marker='.',c=colours[i],label=str(i+1)+"-fold")
  plt.plot(accuracy[i][2],marker='.',c=colours[i])
plt.ylim(0.9,1.0)
plt.grid()
plt.legend();

3. CNN

## Import
from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPool2D
from keras.layers.core import Dense,Activation,Dropout,Flatten
from keras.utils import np_utils
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.datasets import cifar100
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten, Dropout, BatchNormalization
import tensorflow as tf
from tensorflow.keras import datasets, layers, models, optimizers

## set
batch_size = 128
num_classes = 100
epochs = 50

## Split train & test
(X_train, y_train), (X_test, y_test) = cifar100.load_data()

## pixel to 0~1
X_train = X_train / 255.
X_test = X_test / 255.
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)


## Build model
model = Sequential()
# 1st block
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 2nd block
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 3rd block
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# dense layers
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(100, activation='softmax')) # classification
# compile
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
# summray
model.summary()

## Train
from keras.callbacks import EarlyStopping, ModelCheckpoint
early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)

checkpoint_filepath = "cifar.hdf5"
save_best = ModelCheckpoint(
    filepath=checkpoint_filepath, monitor='val_loss', verbose=1, save_best_only=True,
    save_weights_only=True, mode='auto', save_freq='epoch', options=None)

history = model.fit(X_train, y_train
                    , batch_size=batch_size
                    , epochs=epochs
                    , validation_data=(X_test, y_test)
                    , verbose=1
                    , callbacks=[early_stop, save_best])
                    
## Visualize history
import matplotlib.pyplot as plt
f,ax=plt.subplots(2,1) #Creates 2 subplots under 1 column

ax[0].plot(model.history.history['loss'],color='b',label='Training Loss')
ax[0].plot(model.history.history['val_loss'],color='r',label='Validation Loss')

ax[1].plot(model.history.history['accuracy'],color='b',label='Training  Accuracy')
ax[1].plot(model.history.history['val_accuracy'],color='r',label='Validation Accuracy')

 

반응형

'인공지능 > CV' 카테고리의 다른 글

객체탐지 (Object Detection) 2. YOLO !! (v1~v3)  (2) 2021.05.01
객체탐지 (Object Detection) 1. YOLO 이전 까지 흐름  (0) 2021.05.01
MASK RCNN 실행시 버전오류  (0) 2021.04.20
[CODE] EfficientNetB7  (0) 2021.04.10
[CODE] VGG19  (0) 2021.04.10

댓글