diff --git a/Api/__init__.py b/Api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Api/facerecog.py b/Api/facerecog.py index 73f3583..ac8cdb3 100644 --- a/Api/facerecog.py +++ b/Api/facerecog.py @@ -3,8 +3,7 @@ import PIL.Image import dlib import numpy as np - -import models as dat_models +import Api.models as dat_models face_detector = dlib.get_frontal_face_detector() diff --git a/Api/models/__init__.py b/Api/models/__init__.py index 48f8a0a..4b8daab 100644 --- a/Api/models/__init__.py +++ b/Api/models/__init__.py @@ -1,15 +1,23 @@ # -*- coding: utf-8 -*- - from pkg_resources import resource_filename + def pose_predictor_model_location(): - return resource_filename(__name__, "models/shape_predictor_68_face_landmarks.dat") + return resource_filename(__name__, + "shape_predictor_68_face_landmarks.dat") + def pose_predictor_five_point_model_location(): - return resource_filename(__name__, "models/shape_predictor_5_face_landmarks.dat") + return resource_filename(__name__, + "shape_predictor_5_face_landmarks.dat") + def face_recognition_model_location(): - return resource_filename(__name__, "models/dlib_face_recognition_resnet_model_v1.dat") + return resource_filename(__name__, + "dlib_face_recognition_resnet_model_v1.dat" + ) + def cnn_face_detector_model_location(): - return resource_filename(__name__, "models/mmod_human_face_detector.dat") + return resource_filename(__name__, + "mmod_human_face_detector.dat") diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/detectionvisage/detect.py b/detectionvisage/detect.py index ffbe040..643abc1 100644 --- a/detectionvisage/detect.py +++ b/detectionvisage/detect.py @@ -1,20 +1,31 @@ +import os +import sys +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import cv2 -import face_recognition +import takeref +from Api import facerecog +# Take the reference picture to compare all along the exam. +takeref.takepicture() video_capture = cv2.VideoCapture(0) # Load a sample picture and learn how to recognize it. -pierre_image = face_recognition.load_image_file("faces/pierre.png") -maxime_image = face_recognition.load_image_file("faces/maxime.png") -test_image = face_recognition.load_image_file("faces/test.png") -pierre_face_encoding = face_recognition.face_encodings(pierre_image)[0] -maxime_face_encoding = face_recognition.face_encodings(maxime_image)[0] -test_face_encoding = face_recognition.face_encodings(test_image)[0] +# ################################################################# +# TO DO , FETCH une image du serveur au lieux de celles en local### +# ################################################################# +test_image = facerecog.load_image_file("faces/test.png") + +test_face_encoding = facerecog.face_encodings(test_image)[0] # Create arrays of known face encodings and their names -known_face_encodings = [pierre_face_encoding, maxime_face_encoding] -known_face_names = ["pierre", "maxime", "test"] +known_face_encodings = [ + test_face_encoding +] + +known_face_names = [ + "test" +] # Initialize some variables face_locations = [] @@ -23,26 +34,24 @@ while True: # Grab a single frame of video - ret, frame = video_capture.read() - + _, frame = video_capture.read() + frame = cv2.flip(frame, 1) # Resize frame of video to 1/4 size for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Convert the image from BGR color (which OpenCV uses) to RGB color - # (which face_recognition uses) + # (which facerecog uses) rgb_small_frame = small_frame[:, :, ::-1] - # Only process every other frame of video to save time - # Find all the faces and face encodings in the current frame of video - face_locations = face_recognition.face_locations(rgb_small_frame) - face_encodings = face_recognition.face_encodings( + face_locations = facerecog.face_locations(rgb_small_frame) + face_encodings = facerecog.face_encodings( rgb_small_frame, face_locations) face_names = [] for face_encoding in face_encodings: # See if the face is a match for the known face(s) - matches = face_recognition.compare_faces( + matches = facerecog.compare_faces( known_face_encodings, face_encoding) name = "Unknown" @@ -54,7 +63,6 @@ face_names.append(name) - # Display the results for (top, right, bottom, left), name in zip(face_locations, face_names): # Scale back up face locations since the frame we detected in was diff --git a/detectionvisage/faces/etudiant.png b/detectionvisage/faces/etudiant.png new file mode 100644 index 0000000..559fca9 Binary files /dev/null and b/detectionvisage/faces/etudiant.png differ diff --git a/detectionvisage/faces/test.png b/detectionvisage/faces/test.png index b3faece..14acbe0 100644 Binary files a/detectionvisage/faces/test.png and b/detectionvisage/faces/test.png differ diff --git a/detectionvisage/plop.py b/detectionvisage/plop.py index cd5bd04..96336b6 100644 --- a/detectionvisage/plop.py +++ b/detectionvisage/plop.py @@ -1,44 +1,116 @@ -import dlib +import os +import sys +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import cv2 +import takeref +from Api import facerecog +# Take the reference picture to compare all along the exam. +takeref.takepicture() -detector = dlib.get_frontal_face_detector() -cap = cv2.VideoCapture(0) -cap.set(cv2.CAP_PROP_FRAME_WIDTH, 800) -cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 600) -# cap.set(cv2.CAP_PROP_FPS, 10) - -color_green = (0, 255, 0) -color_red = (0, 0, 255) -color_white = (255, 255, 255) -line_width = 3 -font = cv2.FONT_HERSHEY_SIMPLEX -bottomLeftText = (10, 50) -fontScale = 1 -mire = [200, 130, 450, 400] -photo = True -while photo: - _, img = cap.read() - pic = img - cv2.rectangle(img, (mire[0], mire[1]), - (mire[2], mire[3]), color_red, line_width * 2) - - cv2.putText(img, 'Alignez votre tete au carre rouge', bottomLeftText, - font, fontScale, color_white, line_width) - - dets = detector(img) - for det in dets: - pos = [det.left(), det.top(), det.right(), det.bottom()] - cv2.rectangle(img, (det.left(), det.top()), - (det.right(), det.bottom()), color_green, line_width) - - print(pos[0], mire[0], pos[1], mire[1], - pos[2], mire[2], pos[3], mire[3]) - if (pos[0] > mire[0] & pos[1] > mire[1] - & pos[2] < mire[2] & pos[3] < mire[3]): - cv2.imwrite("test.png", pic) - photo = False - - cv2.imshow('my webcam', img) - key = cv2.waitKey(100) +video_capture = cv2.VideoCapture(0) +# Load a sample picture and learn how to recognize it. + +# ################################################################# +# TO DO , FETCH une image du serveur au lieux de celles en local### +# ################################################################# +test_image = facerecog.load_image_file("faces/etudiant.png") + +test_face_encoding = facerecog.face_encodings(test_image)[0] + +# Create arrays of known face encodings and their names +known_face_encodings = [ + test_face_encoding +] + +known_face_names = [ + "etudiant" +] + +# Initialize some variables +face_locations = [] +face_encodings = [] +face_names = [] + +warning_disapear = 0 +warning_unknown = 0 +while True: + # Grab a single frame of video + _, frame = video_capture.read() + frame = cv2.flip(frame, 1) + # Resize frame of video to 1/4 size for faster face recognition processing + small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) + + # Convert the image from BGR color (which OpenCV uses) to RGB color + # (which facerecog uses) + rgb_small_frame = small_frame[:, :, ::-1] + + # Find all the faces and face encodings in the current frame of video + face_locations = facerecog.face_locations(rgb_small_frame) + face_encodings = facerecog.face_encodings( + rgb_small_frame, face_locations) + + # if not face_locations: + # warning_disapear += 1 + + face_names = [] + for face_encoding in face_encodings: + # See if the face is a match for the known face(s) + matches = facerecog.compare_faces( + known_face_encodings, face_encoding) + name = "Unknown" + # If a match was found in known_face_encodings, + # just use the first one. + if True in matches: + first_match_index = matches.index(True) + name = known_face_names[first_match_index] + + face_names.append(name) + + print(face_names) + if "etudiant" not in face_names: + warning_disapear += 1 + else: + print("reset disapear") + warning_disapear = 0 + + if "Unknown" in face_names: + warning_unknown += 1 + + if face_names == ["etudiant"]: + print("reset unknown") + warning_unknown = 0 + + if warning_disapear > 50: + print("Oukilé ?") + if warning_unknown > 50: + print("konépa") + # Display the results + for (top, right, bottom, left), name in zip(face_locations, face_names): + # Scale back up face locations since the frame we detected in was + # scaled to 1/4 size + top *= 4 + right *= 4 + bottom *= 4 + left *= 4 + + # Draw a box around the face + cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) + + # Draw a label with a name below the face + cv2.rectangle(frame, (left, bottom - 35), + (right, bottom), (0, 0, 255), cv2.FILLED) + font = cv2.FONT_HERSHEY_DUPLEX + cv2.putText(frame, name, (left + 6, bottom - 6), + font, 1.0, (255, 255, 255), 1) + + # Display the resulting image + cv2.imshow('Video', frame) + cv2.waitKey(100) + # Hit 'q' on the keyboard to quit! + if cv2.waitKey(1) & 0xFF == ord('q') or cv2.waitKey(1) & 0xFF == 27: + break + +# Release handle to the webcam +video_capture.release() cv2.destroyAllWindows() diff --git a/detectionvisage/plop2.py b/detectionvisage/plop2.py deleted file mode 100644 index 6756669..0000000 --- a/detectionvisage/plop2.py +++ /dev/null @@ -1,53 +0,0 @@ -import dlib -import cv2 - - -cap = cv2.VideoCapture(0) -cap.set(cv2.CAP_PROP_FRAME_WIDTH, 800) -cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 600) -# cap.set(cv2.CAP_PROP_FPS, 10) - -face_cascade = cv2.CascadeClassifier( - '/home/pedrok/Programmes/anaconda3/envs/Master/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml') -eye_cascade = cv2.CascadeClassifier( - '/home/pedrok/Programmes/anaconda3/envs/Master/share/OpenCV/haarcascades/haarcascade_eye.xml') - -color_green = (0, 255, 0) -color_red = (0, 0, 255) -color_white = (255, 255, 255) -line_width = 3 -font = cv2.FONT_HERSHEY_SIMPLEX -bottomLeftText = (10, 50) -fontScale = 1 -mire = [200, 130, 450, 400] -photo = True -while photo: - _, img = cap.read() - pic = img - cv2.rectangle(img, (mire[0], mire[1]), - (mire[2], mire[3]), color_red, line_width * 2) - - cv2.putText(img, 'Alignez votre tete au carre rouge', bottomLeftText, - font, fontScale, color_white, line_width) - - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - faces = face_cascade.detectMultiScale(gray, 1.3, 5) - for (x, y, w, h) in faces: - pos = [x, y, x + w, y + h] - cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2) - roi_gray = gray[y:y + h, x:x + w] - roi_color = img[y:y + h, x:x + w] - eyes = eye_cascade.detectMultiScale(roi_gray) - for nb_eyes, (ex, ey, ew, eh) in enumerate(eyes): - cv2.rectangle(roi_color, (ex, ey), - (ex + ew, ey + eh), (0, 255, 0), 2) - - if (pos[0] > mire[0] and pos[1] > mire[1] and pos[2] < mire[2] - and pos[3] < mire[3] and nb_eyes == 1): - - cv2.imwrite("test.png", pic) - photo = False - cv2.imshow('my webcam', img) - key = cv2.waitKey(100) - -cv2.destroyAllWindows() diff --git a/detectionvisage/prendphoto.py b/detectionvisage/prendphoto.py deleted file mode 100644 index 7a2ea76..0000000 --- a/detectionvisage/prendphoto.py +++ /dev/null @@ -1,52 +0,0 @@ -import cv2 - -cap = cv2.VideoCapture(0) -cap.set(cv2.CAP_PROP_FRAME_WIDTH, 800) -cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 600) -# cap.set(cv2.CAP_PROP_FPS, 10) - -face_cascade = cv2.CascadeClassifier( - 'files/haarcascade_frontalface_default.xml') -eye_cascade = cv2.CascadeClassifier( - 'files/haarcascade_eye.xml') - -color_green = (0, 255, 0) -color_red = (0, 0, 255) -color_white = (255, 255, 255) -line_width = 3 -font = cv2.FONT_HERSHEY_SIMPLEX -bottomLeftText = (10, 50) -fontScale = 1 -mire = [200, 130, 450, 400] -photo = True - -while photo: - _, img = cap.read() - _, pic = cap.read() - cv2.rectangle(img, (mire[0], mire[1]), - (mire[2], mire[3]), color_red, line_width * 2) - - cv2.putText(img, 'Alignez votre tete au carre rouge', bottomLeftText, - font, fontScale, color_white, line_width) - - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - faces = face_cascade.detectMultiScale(gray, 1.3, 5) - for (x, y, w, h) in faces: - pos = [x, y, x + w, y + h] - cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2) - roi_gray = gray[y:y + h, x:x + w] - roi_color = img[y:y + h, x:x + w] - eyes = eye_cascade.detectMultiScale(roi_gray) - for nb_eyes, (ex, ey, ew, eh) in enumerate(eyes): - cv2.rectangle(roi_color, (ex, ey), - (ex + ew, ey + eh), (0, 255, 0), 2) - - if (pos[0] > mire[0] and pos[1] > mire[1] and pos[2] < mire[2] - and pos[3] < mire[3] and nb_eyes == 1): - - cv2.imwrite("faces/test.png", pic) - photo = False - cv2.imshow('Photo reference', img) - key = cv2.waitKey(100) - -cv2.destroyAllWindows() diff --git a/detectionvisage/takeref.py b/detectionvisage/takeref.py new file mode 100644 index 0000000..5699a11 --- /dev/null +++ b/detectionvisage/takeref.py @@ -0,0 +1,76 @@ +import cv2 + + +def takepicture(): + """ + Returns : + ------- + Nothing, write the reference image in a folder. + + Arguments : + ------- + None + + Raises : + ------- + Exeptions related to OpenCV. Only basic functions + + Examples : + ------- + >>> takepicture() + + """ + + # get the webcam + cap = cv2.VideoCapture(0) + # Set the dimensions of the capture + cap.set(cv2.CAP_PROP_FRAME_WIDTH, 800) + cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 600) + + # Load the filters for the eye and face recognizer + face_cascade = cv2.CascadeClassifier( + '../Api/models/haarcascade_frontalface_default.xml') + eye_cascade = cv2.CascadeClassifier( + '../Api/models/haarcascade_eye.xml') + + target = [200, 130, 450, 400] + + phototaken = False + while not phototaken: + _, img = cap.read() + img = cv2.flip(img, 1) + pic = img.copy() + cv2.rectangle(img, (target[0], target[1]), + (target[2], target[3]), (0, 0, 255), 6) + + cv2.putText(img, 'Alignez votre tete au carre rouge', (10, 50), + cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1) + + gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + faces = face_cascade.detectMultiScale(gray, 1.3, 5) + for (x, y, w, h) in faces: + pos = [x, y, x + w, y + h] + cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2) + roi_gray = gray[y:y + h, x:x + w] + roi_color = img[y:y + h, x:x + w] + eyes = eye_cascade.detectMultiScale(roi_gray) + for nb_eyes, (ex, ey, ew, eh) in enumerate(eyes): + cv2.rectangle(roi_color, (ex, ey), + (ex + ew, ey + eh), (0, 255, 0), 2) + + # If the head is in the target and we can see both eyes + if (pos[0] > target[0] + and pos[1] > target[1] + and pos[2] < target[2] + and pos[3] < target[3] + and nb_eyes == 1): + + cv2.imwrite("faces/etudiant.png", pic) + phototaken = True + + cv2.imshow('Photo reference', img) + # To process less frames + cv2.waitKey(100) + + cv2.destroyAllWindows() + return