Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

New CNN Model #31

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file added EmoPy/models/gcp-cnn.h5
Binary file not shown.
85 changes: 85 additions & 0 deletions EmoPy/src/customLayer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
from keras.engine import Layer
from keras.layers import Dense, Flatten, GlobalAveragePooling2D, Conv2D, ConvLSTM2D, Conv3D, MaxPooling2D, Dropout, \
MaxPooling3D, K

class SliceLayer(Layer):
def __init__(self, start=0, items=8, **kwargs):
super(SliceLayer, self).__init__(**kwargs)
self.start = start
self.items = items
self.trainable = False

def build(self, input_shape):
super(SliceLayer, self).build(input_shape)

def call(self, x):
return x[:, :, :, self.start: self.start + self.items]

def compute_output_shape(self, input_shape):
height, width, in_channels = input_shape[1:]
return input_shape[0], height, width, self.items

def get_config(self):
config = {'start': self.start, 'items': self.items}
base_config = super(SliceLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))


class ChannelShuffle(Layer):
def __init__(self, groups=None, groups_factor=8, **kwargs):
super(ChannelShuffle, self).__init__(**kwargs)
self.groups = groups
self.groups_factor = groups_factor
self.trainable = False

def build(self, input_shape):
super(ChannelShuffle, self).build(input_shape)

def call(self, x):
height, width, in_channels = x.shape.as_list()[1:]

if self.groups is None:
if in_channels % self.groups_factor:
raise ValueError("%s %% %s" % (in_channels, self.groups_factor))

self.groups = in_channels // self.groups_factor

channels_per_group = in_channels // self.groups

x = K.reshape(x, [-1, height, width, self.groups, channels_per_group])
x = K.permute_dimensions(x, (0, 1, 2, 4, 3)) # transpose
x = K.reshape(x, [-1, height, width, in_channels])

return x

def compute_output_shape(self, input_shape):
return input_shape

def get_config(self):
config = {'groups': self.groups, 'groups_factor': self.groups_factor}
base_config = super(ChannelShuffle, self).get_config()
return dict(list(base_config.items()) + list(config.items()))


class PadZeros(Layer):
def __init__(self, diff, **kwargs):
super(PadZeros, self).__init__(**kwargs)
self.diff = diff
self.trainable = False

def build(self, input_shape):
# Create a trainable weight variable for this layer.
super(PadZeros, self).build(input_shape) # Be sure to call this somewhere!

def call(self, x):
import tensorflow as tf
return tf.pad(x, ((0, 0), (0, 0), (0, 0), (0, self.diff)), mode='CONSTANT')

def compute_output_shape(self, input_shape):
batch, b_width, b_height, b_channels = input_shape
return batch, b_width, b_height, b_channels + self.diff

def get_config(self):
config = {'diff': self.diff}
base_config = super(PadZeros, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
62 changes: 60 additions & 2 deletions EmoPy/src/neuralnets.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,26 @@
import keras
from keras.applications.inception_v3 import InceptionV3
from keras.applications.xception import Xception
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications.resnet50 import ResNet50
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
from keras.engine import Layer
from keras.engine.saving import load_model
from keras.layers import Dense, Flatten, GlobalAveragePooling2D, Conv2D, ConvLSTM2D, Conv3D, MaxPooling2D, Dropout, \
MaxPooling3D
MaxPooling3D, K
from keras.layers.normalization import BatchNormalization
from keras.losses import categorical_crossentropy
from keras.models import Model, Sequential
from keras.regularizers import l2
from keras.optimizers import Adam
from keras.optimizers import Adam, Adamax
from keras.utils import plot_model
import json

from EmoPy.src.callback import PlotLosses
from keras_preprocessing.image import ImageDataGenerator

from EmoPy.EmoPy.src.customLayer import SliceLayer, ChannelShuffle, PadZeros


class _FERNeuralNet(object):
Expand Down Expand Up @@ -419,3 +425,55 @@ def fit(self, image_data, labels, validation_split, epochs=50):
self.model.compile(optimizer="RMSProp", loss="cosine_proximity", metrics=["accuracy"])
self.model.fit(image_data, labels, epochs=epochs, validation_split=validation_split,
callbacks=[ReduceLROnPlateau(), EarlyStopping(patience=3)])

class CGP_CNN(_FERNeuralNet):
"""
A Convolutional Neural Network model which was automatically found using a Cartesian Genetic Programming library.
https://github.com/scheckmedia/cgp-cnn-design

:param emotion_map: dict of target emotion label keys with int values corresponding to the index of the emotion probability in the prediction output array
:param verbose: if true, will print out extra process information
"""

def __init__(self,emotion_map,verbose=False):
self.verbose = verbose
super().__init__(emotion_map)

def _init_model(self):
"""
Loads the network from the h5 file.
"""
model = load_model("EmoPy/models/gcp-cnn.h5", custom_objects={'SliceLayer': SliceLayer, 'ChannelShuffle': ChannelShuffle,
'PadZeros': PadZeros})
if self.verbose:
model.summary()
self.model = model

def fit(self, x_train, y_train):
"""
Trains the neural net on the data provided.

:param x_train: Train Data in the format: [:,48,48,1]
:param y_train: Label Data in the format: [:,7]
"""
datagen = ImageDataGenerator(
width_shift_range=0.1,
height_shift_range=0.1,
rotation_range=1,
horizontal_flip=True)
callback_LR = keras.callbacks.ReduceLROnPlateau(
monitor="val_acc",
factor=0.75,
patience=10,
verbose=1,
mode='auto',
min_delta=0.00001,
min_lr=0.0000001)
callback_LR.set_model(self.model)
callbacks = [callback_LR]
self.model.compile(loss='categorical_crossentropy',
optimizer=Adamax(lr=0.001),
metrics=['accuracy'])
self.model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=(x_train.shape[0] // 32) + 1, verbose=1 if self.verbose else 0, epochs=200, callbacks=callbacks)