반응형
5.EfficientNetB7

## Import
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import cifar100
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
import cv2
import albumentations as albu
from skimage.transform import resize
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from pylab import rcParams
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau
import tensorflow as tf
import keras
from keras.models import Sequential, load_model
from keras.layers import Dropout, Dense, GlobalAveragePooling2D
from keras.optimizers import Adam
import efficientnet.keras as efn
## set
n_classes = 100
epochs = 15
batch_size = 8
## split data
(X_train, y_train), (X_test, y_test) = cifar100.load_data()
X_train = X_train / 255.0
X_test = X_test / 255.0
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
sss = StratifiedShuffleSplit(n_splits=2, test_size=0.2, random_state=123)
for train_index, val_index in sss.split(X_train, y_train):
X_train_data, X_val_data = X_train[train_index], X_train[val_index]
y_train_data, y_val_data = y_train[train_index], y_train[val_index]
print("Number of training samples: ", X_train_data.shape[0])
print("Number of validation samples: ", X_val_data.shape[0])
## Data manipulation
def resize_img(img, shape):
return cv2.resize(img, (shape[1], shape[0]), interpolation=cv2.INTER_CUBIC)
class DataGenerator(keras.utils.Sequence):
def __init__(self, images, labels=None, mode='fit', batch_size=batch_size, dim=(32, 32), channels=3, n_classes=n_classes, shuffle=True, augment=False):
#initializing the configuration of the generator
self.images = images
self.labels = labels
self.mode = mode
self.batch_size = batch_size
self.dim = dim
self.channels = channels
self.n_classes = n_classes
self.shuffle = shuffle
self.augment = augment
self.on_epoch_end()
#method to be called after every epoch
def on_epoch_end(self):
self.indexes = np.arange(self.images.shape[0])
if self.shuffle == True:
np.random.shuffle(self.indexes)
#return numbers of steps in an epoch using samples & batch size
def __len__(self):
return int(np.floor(len(self.images) / self.batch_size))
#this method is called with the batch number as an argument to #obtain a given batch of data
def __getitem__(self, index):
#generate one batch of data
#generate indexes of batch
batch_indexes = self.indexes[index * self.batch_size:(index+1) * self.batch_size]
#generate mini-batch of X
X = np.empty((self.batch_size, *self.dim, self.channels))
for i, ID in enumerate(batch_indexes):
#generate pre-processed image
img = self.images[ID]
#image rescaling
img = img.astype(np.float32)/255.
#resizing as per new dimensions
img = resize_img(img, self.dim)
X[i] = img
#generate mini-batch of y
if self.mode == 'fit':
y = self.labels[batch_indexes]
#augmentation on the training dataset
if self.augment == True:
X = self.__augment_batch(X)
return X, y
elif self.mode == 'predict':
return X
else:
raise AttributeError("The mode should be set to either 'fit' or 'predict'.")
#augmentation for one image
def __random_transform(self, img):
composition = albu.Compose([albu.HorizontalFlip(p=0.5),
albu.VerticalFlip(p=0.5),
albu.GridDistortion(p=0.2),
albu.ElasticTransform(p=0.2)])
return composition(image=img)['image']
#augmentation for batch of images
def __augment_batch(self, img_batch):
for i in range(img_batch.shape[0]):
img_batch[i] = self.__random_transform(img_batch[i])
return img_batch
#object
train_data_generator = DataGenerator(X_train_data, y_train_data, augment=True)
valid_data_generator = DataGenerator(X_val_data, y_val_data, augment=False)
## Build model
from tensorflow.keras.applications import EfficientNetB7
efnb0 = efn.EfficientNetB7(weights='imagenet', include_top=False, input_shape=(32,32,3), classes=n_classes)
model = Sequential()
model.add(efnb0)
model.add(GlobalAveragePooling2D())
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax'))
model.summary()
## Train
model_history = model.fit(train_data_generator, validation_data = valid_data_generator, callbacks = [early_stop, rlrop],verbose = 1, epochs = epochs)
반응형
'인공지능 > CV' 카테고리의 다른 글
객체탐지 (Object Detection) 2. YOLO !! (v1~v3) (2) | 2021.05.01 |
---|---|
객체탐지 (Object Detection) 1. YOLO 이전 까지 흐름 (0) | 2021.05.01 |
MASK RCNN 실행시 버전오류 (0) | 2021.04.20 |
[CODE] VGG19 (0) | 2021.04.10 |
[CODE] ANN, K-fold CV, CNN (0) | 2021.04.10 |
댓글