Skip to content
This repository has been archived by the owner on Sep 15, 2022. It is now read-only.

Commit

Permalink
Detect leaving and wrong person
Browse files Browse the repository at this point in the history
  • Loading branch information
ierezell committed Sep 11, 2018
1 parent 91f2861 commit cd46083
Show file tree
Hide file tree
Showing 11 changed files with 228 additions and 170 deletions.
Empty file added Api/__init__.py
Empty file.
3 changes: 1 addition & 2 deletions Api/facerecog.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,7 @@
import PIL.Image
import dlib
import numpy as np

import models as dat_models
import Api.models as dat_models

face_detector = dlib.get_frontal_face_detector()

Expand Down
18 changes: 13 additions & 5 deletions Api/models/__init__.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,23 @@
# -*- coding: utf-8 -*-

from pkg_resources import resource_filename


def pose_predictor_model_location():
return resource_filename(__name__, "models/shape_predictor_68_face_landmarks.dat")
return resource_filename(__name__,
"shape_predictor_68_face_landmarks.dat")


def pose_predictor_five_point_model_location():
return resource_filename(__name__, "models/shape_predictor_5_face_landmarks.dat")
return resource_filename(__name__,
"shape_predictor_5_face_landmarks.dat")


def face_recognition_model_location():
return resource_filename(__name__, "models/dlib_face_recognition_resnet_model_v1.dat")
return resource_filename(__name__,
"dlib_face_recognition_resnet_model_v1.dat"
)


def cnn_face_detector_model_location():
return resource_filename(__name__, "models/mmod_human_face_detector.dat")
return resource_filename(__name__,
"mmod_human_face_detector.dat")
Empty file added __init__.py
Empty file.
44 changes: 26 additions & 18 deletions detectionvisage/detect.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,31 @@
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import cv2
import face_recognition
import takeref
from Api import facerecog
# Take the reference picture to compare all along the exam.
takeref.takepicture()

video_capture = cv2.VideoCapture(0)

# Load a sample picture and learn how to recognize it.
pierre_image = face_recognition.load_image_file("faces/pierre.png")
maxime_image = face_recognition.load_image_file("faces/maxime.png")
test_image = face_recognition.load_image_file("faces/test.png")

pierre_face_encoding = face_recognition.face_encodings(pierre_image)[0]
maxime_face_encoding = face_recognition.face_encodings(maxime_image)[0]
test_face_encoding = face_recognition.face_encodings(test_image)[0]
# #################################################################
# TO DO , FETCH une image du serveur au lieux de celles en local###
# #################################################################
test_image = facerecog.load_image_file("faces/test.png")

test_face_encoding = facerecog.face_encodings(test_image)[0]

# Create arrays of known face encodings and their names
known_face_encodings = [pierre_face_encoding, maxime_face_encoding]
known_face_names = ["pierre", "maxime", "test"]
known_face_encodings = [
test_face_encoding
]

known_face_names = [
"test"
]

# Initialize some variables
face_locations = []
Expand All @@ -23,26 +34,24 @@

while True:
# Grab a single frame of video
ret, frame = video_capture.read()

_, frame = video_capture.read()
frame = cv2.flip(frame, 1)
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

# Convert the image from BGR color (which OpenCV uses) to RGB color
# (which face_recognition uses)
# (which facerecog uses)
rgb_small_frame = small_frame[:, :, ::-1]

# Only process every other frame of video to save time

# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(
face_locations = facerecog.face_locations(rgb_small_frame)
face_encodings = facerecog.face_encodings(
rgb_small_frame, face_locations)

face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(
matches = facerecog.compare_faces(
known_face_encodings, face_encoding)
name = "Unknown"

Expand All @@ -54,7 +63,6 @@

face_names.append(name)


# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was
Expand Down
Binary file added detectionvisage/faces/etudiant.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified detectionvisage/faces/test.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
152 changes: 112 additions & 40 deletions detectionvisage/plop.py
Original file line number Diff line number Diff line change
@@ -1,44 +1,116 @@
import dlib
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import cv2
import takeref
from Api import facerecog
# Take the reference picture to compare all along the exam.
takeref.takepicture()

detector = dlib.get_frontal_face_detector()
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 800)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)
# cap.set(cv2.CAP_PROP_FPS, 10)

color_green = (0, 255, 0)
color_red = (0, 0, 255)
color_white = (255, 255, 255)
line_width = 3
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftText = (10, 50)
fontScale = 1
mire = [200, 130, 450, 400]
photo = True
while photo:
_, img = cap.read()
pic = img
cv2.rectangle(img, (mire[0], mire[1]),
(mire[2], mire[3]), color_red, line_width * 2)

cv2.putText(img, 'Alignez votre tete au carre rouge', bottomLeftText,
font, fontScale, color_white, line_width)

dets = detector(img)
for det in dets:
pos = [det.left(), det.top(), det.right(), det.bottom()]
cv2.rectangle(img, (det.left(), det.top()),
(det.right(), det.bottom()), color_green, line_width)

print(pos[0], mire[0], pos[1], mire[1],
pos[2], mire[2], pos[3], mire[3])
if (pos[0] > mire[0] & pos[1] > mire[1]
& pos[2] < mire[2] & pos[3] < mire[3]):
cv2.imwrite("test.png", pic)
photo = False

cv2.imshow('my webcam', img)
key = cv2.waitKey(100)
video_capture = cv2.VideoCapture(0)

# Load a sample picture and learn how to recognize it.

# #################################################################
# TO DO , FETCH une image du serveur au lieux de celles en local###
# #################################################################
test_image = facerecog.load_image_file("faces/etudiant.png")

test_face_encoding = facerecog.face_encodings(test_image)[0]

# Create arrays of known face encodings and their names
known_face_encodings = [
test_face_encoding
]

known_face_names = [
"etudiant"
]

# Initialize some variables
face_locations = []
face_encodings = []
face_names = []

warning_disapear = 0
warning_unknown = 0
while True:
# Grab a single frame of video
_, frame = video_capture.read()
frame = cv2.flip(frame, 1)
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

# Convert the image from BGR color (which OpenCV uses) to RGB color
# (which facerecog uses)
rgb_small_frame = small_frame[:, :, ::-1]

# Find all the faces and face encodings in the current frame of video
face_locations = facerecog.face_locations(rgb_small_frame)
face_encodings = facerecog.face_encodings(
rgb_small_frame, face_locations)

# if not face_locations:
# warning_disapear += 1

face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = facerecog.compare_faces(
known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings,
# just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]

face_names.append(name)

print(face_names)
if "etudiant" not in face_names:
warning_disapear += 1
else:
print("reset disapear")
warning_disapear = 0

if "Unknown" in face_names:
warning_unknown += 1

if face_names == ["etudiant"]:
print("reset unknown")
warning_unknown = 0

if warning_disapear > 50:
print("Oukilé ?")
if warning_unknown > 50:
print("konépa")
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was
# scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4

# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35),
(right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6),
font, 1.0, (255, 255, 255), 1)

# Display the resulting image
cv2.imshow('Video', frame)
cv2.waitKey(100)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q') or cv2.waitKey(1) & 0xFF == 27:
break

# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
53 changes: 0 additions & 53 deletions detectionvisage/plop2.py

This file was deleted.

52 changes: 0 additions & 52 deletions detectionvisage/prendphoto.py

This file was deleted.

Loading

0 comments on commit cd46083

Please sign in to comment.