Demonstrate emotion recognition using Fisher Faces recognition model (how to train and use) and opencv_contrib project.
- Load
emotion_detection_model.xmlusingcreateFileFromUrl()fromutils.js:
utils.createFileFromUrl('emotion_detection_model.xml', emotionModelUrl, callback);- Initialize array of emotions which model is able to recognize:
const emotions = ['neutral', 'anger', 'disgust', 'fear', 'happiness', 'sadness', 'surprise']- Load emoticons
Create img elements and set src attribute:
emotions.forEach(emotion => {
let emoticonImg = createImgNode(emotion + '-emoticon');
emoticonImg.onload = function () {
++nImagesLoaded;
};
emoticonImg.src = '../../data/emoticons/' + emotion + '.png';
});Extract masks from rgbaVector to draw transparent background and push {'name': .., 'src': .., 'mask': ..} structures to emoticons array when images are loaded:
function waitForResources() {
if (nImagesLoaded == N_IMAGES) {
emotions.forEach(emotion => {
let emoticonImg = document.getElementById(emotion + '-emoticon');
let rgbaVector = new cv.MatVector();
let emoticon = {};
emoticon.src = cv.imread(emoticonImg);
cv.split(emoticon.src, rgbaVector); // Create mask from alpha channel.
emoticon.mask = rgbaVector.get(3);
emoticon.name = emotion;
emoticons.push(emoticon);
rgbaVector.delete();
});
requestAnimationFrame(processVideo);
return;
}
setTimeout(waitForResources, 50);
}
waitForResources();- Create FisherFaceRecognizer and read model:
fisherFaceRecognizer = new cv.face_FisherFaceRecognizer();
fisherFaceRecognizer.read('emotion_detection_model.xml');- Detect face using Haar Cascade model (see face detection README file):
faceCascade.detectMultiScale(gray, faceVec);- Recognize emotion from a face
Prepre face:
let face = faceVec.get(i);
let faceGray = gray.roi(face);
cv.resize(faceGray, faceGray, new cv.Size(350, 350));Predict emotion:
let prediction = fisherFaceRecognizer.predict_label(faceGray);
let emoticon = emoticons[prediction];- Draw emoticon over a face
Resize image source and mask:
let newEmoticonSize = new cv.Size(face.width, face.height);
let resizedEmoticon = new cv.Mat();
let resizedMask = new cv.Mat();
cv.resize(emoticon.src, resizedEmoticon, newEmoticonSize);
cv.resize(emoticon.mask, resizedMask, newEmoticonSize);Copy resized emoticon to stream image using face coordinated:
resizedEmoticon.copyTo(src.rowRange(face.y, face.y + face.height)
.colRange(face.x, face.x + face.width), resizedMask);- Add the following flags to
def get_cmake_cmd(self)ofopencv/platforms/js/build_js.py:
"-DBUILD_opencv_face=ON",
"-DOPENCV_EXTRA_MODULES_PATH='/home/path-to-opencv-contrib/opencv_contrib/modules'"- Define
facemodule inopencv/modules/js/src/embindgen.py. Include all global functions, classes and their methods that you want to have in your wasm. For example:
face = {'face_FaceRecognizer': ['train', 'update', 'predict_label', 'write', 'read', 'setLabelInfo', 'getLabelInfo', 'getLabelsByString', 'getThreshold', 'setThreshold'],
'face_BasicFaceRecognizer': ['getNumComponents', 'setNumComponents', 'getThreshold', 'setThreshold', 'getProjections', 'getLabels', 'getEigenValues', 'getEigenVectors', 'getMean', 'read', 'write'],
'face_FisherFaceRecognizer': ['create']}- Add the
facemodule to themakeWhiteListinopencv/modules/js/src/embindgen.py:
white_list = makeWhiteList([core, imgproc, objdetect, video, dnn, features2d, photo, aruco, calib3d, face])- Add the following in
opencv/modules/js/src/core_bindings.cpp:
using namespace face;- Append
jsinocv_define_moduleofopencv_contrib/modules/face/CMakeLists.txt:
ocv_define_module(face opencv_core
opencv_imgproc
opencv_objdetect
opencv_calib3d # estimateAffinePartial2D() (trainFacemark)
opencv_photo # seamlessClone() (face_swap sample)
WRAP python java js
)