Skip to content
This repository has been archived by the owner on Sep 15, 2022. It is now read-only.

Commit

Permalink
Add sound and test import
Browse files Browse the repository at this point in the history
  • Loading branch information
ierezell committed Sep 25, 2018
1 parent cd46083 commit 21c0c0d
Show file tree
Hide file tree
Showing 20 changed files with 257 additions and 115 deletions.
14 changes: 14 additions & 0 deletions ExamAi.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import os
import sys
sys.path.append(os.path.abspath(__file__))
sys.path.append(os.path.abspath(__file__) + '/detectFaces/')
sys.path.append(os.path.abspath(__file__) + '/detectSound/')
sys.path.append(os.path.abspath(__file__) + '/Api/')

from detectFaces import detectWebcam, takeref
from Api import facerecog, models
import cv2
import pyaudio

if __name__ == "__main__":
detectWebcam.detectStudent()
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
# ExamAi
# ExamAi

The goal is to monitor students passing exams and add some artificial intelligence to help teacher to keep an eye on students.
5 changes: 5 additions & 0 deletions detectFaces/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
import cv2

from detectFaces import detectWebcam
from detectFaces import takeref
__all__ = ['detectWebcam', 'takeref']
110 changes: 110 additions & 0 deletions detectFaces/detectWebcam.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
def detectStudent():
# Take the reference picture to compare all along the exam.
takeref.takepicture()

video_capture = cv2.VideoCapture(0)

# Load a sample picture and learn how to recognize it.

# #################################################################
# TO DO , FETCH une image du serveur au lieux de celles en local###
# #################################################################
test_image = facerecog.load_image_file("faces/etudiant.png")

test_face_encoding = facerecog.face_encodings(test_image)[0]

# Create arrays of known face encodings and their names
known_face_encodings = [
test_face_encoding
]

known_face_names = [
"etudiant"
]

# Initialize some variables
face_locations = []
face_encodings = []
face_names = []

warning_disapear = 0
warning_unknown = 0
while True:
# Grab a single frame of video
_, frame = video_capture.read()
frame = cv2.flip(frame, 1)
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

# Convert the image from BGR color (which OpenCV uses) to RGB color
# (which facerecog uses)
rgb_small_frame = small_frame[:, :, ::-1]

# Find all the faces and face encodings in the current frame of video
face_locations = facerecog.face_locations(rgb_small_frame)
face_encodings = facerecog.face_encodings(
rgb_small_frame, face_locations)

# if not face_locations:
# warning_disapear += 1

face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = facerecog.compare_faces(
known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings,
# just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]

face_names.append(name)

print(face_names)
if "etudiant" not in face_names:
warning_disapear += 1
else:
warning_disapear = 0

if "Unknown" in face_names:
warning_unknown += 1

if face_names == ["etudiant"]:
warning_unknown = 0

if warning_disapear > 50:
print("Oukilé ?")
if warning_unknown > 50:
print("konépa")
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was
# scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4

# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35),
(right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6),
font, 1.0, (255, 255, 255), 1)

# Display the resulting image
cv2.imshow('Video', frame)
cv2.waitKey(200)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q') or cv2.waitKey(1) & 0xFF == 27:
break

# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
return
Binary file added detectFaces/faces/etudiant.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
File renamed without changes
File renamed without changes
File renamed without changes
5 changes: 2 additions & 3 deletions detectionvisage/plop.py → detectFaces/plop.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

import cv2
import takeref
from Api import facerecog
Expand Down Expand Up @@ -67,25 +68,23 @@

face_names.append(name)

print(face_names)
if "etudiant" not in face_names:
warning_disapear += 1
else:
print("reset disapear")
warning_disapear = 0

if "Unknown" in face_names:
warning_unknown += 1

if face_names == ["etudiant"]:
print("reset unknown")
warning_unknown = 0

if warning_disapear > 50:
print("Oukilé ?")
if warning_unknown > 50:
print("konépa")
# Display the results

for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was
# scaled to 1/4 size
Expand Down
3 changes: 1 addition & 2 deletions detectionvisage/takeref.py → detectFaces/takeref.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
import cv2


def takepicture():
"""
Returns :
Expand Down Expand Up @@ -30,6 +28,7 @@ def takepicture():
# Load the filters for the eye and face recognizer
face_cascade = cv2.CascadeClassifier(
'../Api/models/haarcascade_frontalface_default.xml')
print(face_cascade.empty())
eye_cascade = cv2.CascadeClassifier(
'../Api/models/haarcascade_eye.xml')

Expand Down
Binary file added detectSound/RockGuitar-16-44p1-stereo-72secs.wav
Binary file not shown.
File renamed without changes.
38 changes: 38 additions & 0 deletions detectSound/detectSound.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import pyaudio
import wave

CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "output.wav"

p = pyaudio.PyAudio()

stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)

print("* recording")

frames = []

for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)

print("* done recording")

stream.stop_stream()
stream.close()
p.terminate()

wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
34 changes: 34 additions & 0 deletions detectSound/plop.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import speech_recognition as sr
from gtts import gTTS
#quiet the endless 'insecurerequest' warning
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

from pygame import mixer
mixer.init()

while (True == True):
# obtain audio from the microphone
r = sr.Recognizer()
with sr.Microphone() as source:
#print("Please wait. Calibrating microphone...")
# listen for 1 second and create the ambient noise energy level
r.adjust_for_ambient_noise(source, duration=1)
print("Say something!")
audio = r.listen(source,phrase_time_limit=5)

# recognize speech using Sphinx/Google
try:
#response = r.recognize_sphinx(audio)
response = r.recognize_google(audio)
print("I think you said '" + response + "'")
tts = gTTS(text="I think you said "+str(response), lang='en')
tts.save("response.mp3")
mixer.music.load('response.mp3')
mixer.music.play()


except sr.UnknownValueError:
print("Sphinx could not understand audio")
except sr.RequestError as e:
print("Sphinx error; {0}".format(e))
31 changes: 31 additions & 0 deletions detectSound/test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import pyaudio
import wave
import sys
print("plop")
pa = pyaudio.PyAudio()
print(pa.get_default_input_device_info())
CHUNK = 1024

if len(sys.argv) < 2:
print("Plays a wave file.\n\nUsage: %s filename.wav" % sys.argv[0])
sys.exit(-1)

wf = wave.open(sys.argv[1], 'rb')

p = pyaudio.PyAudio()

stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)

data = wf.readframes(CHUNK)

while data != '':
stream.write(data)
data = wf.readframes(CHUNK)

stream.stop_stream()
stream.close()

p.terminate()
Empty file removed detectionvisage/__init__.py
Empty file.
94 changes: 0 additions & 94 deletions detectionvisage/detect.py

This file was deleted.

Binary file removed detectionvisage/faces/etudiant.png
Binary file not shown.
6 changes: 6 additions & 0 deletions plan
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
horloge
requests
takeref javascipt
rapport annoté
get Info debut exam Idul
Mechanisme Clée ?
Loading

0 comments on commit 21c0c0d

Please sign in to comment.