-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathemotion_recognition_cnn.py
190 lines (143 loc) · 7.14 KB
/
emotion_recognition_cnn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
"""
This code is an implementation of a deep learning model for face expression recognition using the Keras
framework. The goal of the model is to classify facial expressions into seven categories:
angry, disgust, fear, happy, neutral, sad, and surprise.
The code first imports the necessary libraries including Keras, matplotlib, and os.
It then displays a sample of images of the expression "disgust" from the dataset to visualize the data.
The training and validation data are created using Keras' ImageDataGenerator which reads images from the
given directory and returns batches of images and labels. The train_set and test_set are initialized with
the parameters such as target size, batch size, and color mode.
The model is built using a sequential model in Keras, with a series of Convolutional Neural Network (CNN)
layers followed by fully connected layers. The first four layers are CNN layers, each followed by batch
normalization, activation, max pooling, and dropout. The output of the last CNN layer is then flattened and
fed into two fully connected layers, each with a batch normalization, activation, dropout, and dense layer.
The last dense layer has a softmax activation function which returns the probabilities for each class.
The model is then compiled using the Adam optimizer, categorical cross-entropy as the loss function,
and accuracy as the metric to evaluate the performance.
The model is fitted using the fit_generator() function in Keras. The function takes the training and validation
data, the number of epochs, and a list of callback functions that monitor the training process, and then
updates the weights in the model accordingly. In this code, three callback functions are used: ModelCheckpoint,
EarlyStopping, and ReduceLROnPlateau.
Finally, the code visualizes the loss and accuracy of the model during the training process using the
matplotlib library. The history object returned by the fit_generator() function is used to plot the
loss and accuracy curves for both training and validation data.
"""
import matplotlib.pyplot as plt
import os
# Importing Deep Learning Libraries
from keras.preprocessing.image import load_img, img_to_array
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense,Dropout,Flatten,Conv2D,BatchNormalization,Activation,MaxPooling2D
from keras.models import Sequential
#Displaying Images
picture_size = 48
folder_path = "../input/face-expression-recognition-dataset/images/"
expression = 'disgust'
plt.figure(figsize= (12,12))
for i in range(1, 10, 1):
plt.subplot(3,3,i)
img = load_img(folder_path+"train/"+expression+"/"+
os.listdir(folder_path + "train/" + expression)[i], target_size=(picture_size, picture_size))
plt.imshow(img)
plt.show()
#Making Training and Validation Data
batch_size = 128
datagen_train = ImageDataGenerator()
datagen_val = ImageDataGenerator()
train_set = datagen_train.flow_from_directory(folder_path+"train",
target_size = (picture_size,picture_size),
color_mode = "grayscale",
batch_size=batch_size,
class_mode='categorical',
shuffle=True)
test_set = datagen_val.flow_from_directory(folder_path+"validation",
target_size = (picture_size,picture_size),
color_mode = "grayscale",
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
#Model Building
from keras.optimizers import Adam,SGD,RMSprop
no_of_classes = 7
model = Sequential()
#1st CNN layer
model.add(Conv2D(64,(3,3),padding = 'same',input_shape = (48,48,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(0.25))
#2nd CNN layer
model.add(Conv2D(128,(5,5),padding = 'same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout (0.25))
#3rd CNN layer
model.add(Conv2D(512,(3,3),padding = 'same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout (0.25))
#4th CNN layer
model.add(Conv2D(512,(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
#Fully connected 1st layer
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
# Fully connected layer 2nd layer
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(no_of_classes, activation='softmax'))
opt = Adam(lr = 0.0001)
model.compile(optimizer=opt,loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
#Fitting the Model with Training and Validation Data
from keras.optimizers import RMSprop,SGD,Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
checkpoint = ModelCheckpoint("./model.h5", monitor='val_acc', verbose=1, save_best_only=True, mode='max')
early_stopping = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=3,
verbose=1,
restore_best_weights=True
)
reduce_learningrate = ReduceLROnPlateau(monitor='val_loss',
factor=0.2,
patience=3,
verbose=1,
min_delta=0.0001)
callbacks_list = [early_stopping,checkpoint,reduce_learningrate]
epochs = 48
model.compile(loss='categorical_crossentropy',
optimizer = Adam(lr=0.001),
metrics=['accuracy'])
history = model.fit_generator(generator=train_set,
steps_per_epoch=train_set.n//train_set.batch_size,
epochs=epochs,
validation_data = test_set,
validation_steps = test_set.n//test_set.batch_size,
callbacks=callbacks_list
)
#Plotting Accuracy & Loss
plt.style.use('dark_background')
plt.figure(figsize=(20,10))
plt.subplot(1, 2, 1)
plt.suptitle('Optimizer : Adam', fontsize=10)
plt.ylabel('Loss', fontsize=16)
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.legend(loc='upper right')
plt.subplot(1, 2, 2)
plt.ylabel('Accuracy', fontsize=16)
plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.legend(loc='lower right')
plt.show()