diff --git a/build_csv.py b/build_csv.py index ac3b10e..dc2df26 100644 --- a/build_csv.py +++ b/build_csv.py @@ -12,14 +12,15 @@ if __name__== "__main__": # output csv file where we save paths to out images write_file= open("data.csv", "w") - data_dir= "/home/suryateja/Documents/GitHub/FaceRecognizer/output_images" + data_dir= "C:/Users/jaraa/OneDrive/Documentos/GitHub/FaceRecognizer/sorted_output_images" # folders for each subject in root images folder subjects= listdir(data_dir) - #print(subjects) + print(subjects) for i in range(len(subjects)): - # all pictures in a particular folder/subject + # all pictures in a particular folder/subject + print(subjects[i]) photos= listdir(data_dir+ "/"+ subjects[i]) for photo in photos: out_line= data_dir+ "/"+ subjects[i]+ "/"+ photo+ ";"+ str(i) diff --git a/detect_save_images.py b/detect_save_images.py index 759cf37..cc85a9a 100644 --- a/detect_save_images.py +++ b/detect_save_images.py @@ -7,11 +7,14 @@ If face(s) is(are) found, it crops and saves them at "output_path". ''' -import cv2 -import cv2.cv as cv + +import cv2 as cv from os import listdir import time + + + def cropImage(img, box): [p, q, r, s]= box # crop and save the image provided with the co-ordinates of bounding box @@ -20,28 +23,28 @@ def cropImage(img, box): # save the cropped image at specified location def saveCropped(img, name): - cv2.imwrite(output_path+ name+ ".jpg", img) + cv.imwrite(output_path+ name+ ".jpg", img) if __name__== "__main__": # paths to input and output images - input_path= "old/input_images/" - output_path= "old/output_images/" + input_path= "input_images/" + output_path= "output_images/" # load pre-trained frontalface cascade classifier - frontal_face= cv2.CascadeClassifier("haarcascade_frontalface_default.xml") - input_names= listdir("/home/suryateja/Documents/GitHub/FaceRecognizer/"+ input_path) + frontal_face= cv.CascadeClassifier("haarcascade_frontalface_default.xml") + input_names= listdir("C:/Users/jaraa/OneDrive/Documentos/GitHub/FaceRecognizer/"+ input_path) print("Starting to detect faces in images and save the cropped images to output file...") sttime= time.clock() i= 1 for name in input_names: print(input_path+name) - color_img= cv2.imread(input_path+ name) + color_img= cv.imread(input_path+ name) # converting color image to grayscale image - gray_img= cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY) + gray_img= cv.cvtColor(color_img, cv.COLOR_BGR2GRAY) # find the bounding boxes around detected faces in images - bBoxes= frontal_face.detectMultiScale(gray_img, scaleFactor=1.3, minNeighbors=5, minSize=(30, 30), flags = cv.CV_HAAR_SCALE_IMAGE) + bBoxes= frontal_face.detectMultiScale(gray_img, scaleFactor=1.3, minNeighbors=5, minSize=(30, 30), flags = cv.CASCADE_SCALE_IMAGE) #print(bBoxes) for box in bBoxes: diff --git a/face_detect_recognition.py b/face_detect_recognition.py index 2483a25..724af5c 100644 --- a/face_detect_recognition.py +++ b/face_detect_recognition.py @@ -18,8 +18,7 @@ Note: Images in here can be full sized images. (no need to crop faces) ''' -import cv2 -import cv2.cv as cv +import cv2 as cv2 import numpy as np from os import listdir import sys, time @@ -67,7 +66,7 @@ def get_images(path, size): # initializing eigen_model and training print("Initializing eigen FaceRecognizer and training...") sttime= time.clock() - eigen_model= cv2.createEigenFaceRecognizer() + eigen_model= cv2.face.EigenFaceRecognizer_create() eigen_model.train(images, labels) print("\tSuccessfully completed training in "+ str(time.clock()- sttime)+ " Secs!") @@ -90,7 +89,7 @@ def get_images(path, size): #starting to detect face in given image frontal_face= cv2.CascadeClassifier("haarcascade_frontalface_default.xml") #bBoxes= frontal_face.detectMultiScale(pre_image, scaleFactor=1.3, minNeighbors=5, minSize=(30, 30), flags = cv.CV_HAAR_SCALE_IMAGE) - bBoxes= frontal_face.detectMultiScale(pre_image, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags = cv.CV_HAAR_SCALE_IMAGE) + bBoxes= frontal_face.detectMultiScale(pre_image, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags = cv2.CASCADE_SCALE_IMAGE) for bBox in bBoxes: (p,q,r,s)= bBox