Skip to content
Snippets Groups Projects
Commit ec3b8e2a authored by Simon Flaisch's avatar Simon Flaisch
Browse files

I have created a project and implemented the following basic functions:

- Text to speech
- Video capture
- Facial recognition and analysis
- Age, gender and emotion recognition

I also added a requirements txt that can be used to acquire all used packages via this comment: pip install -r requirements.txt
parent 9f32e804
No related branches found
No related tags found
No related merge requests found
import cv2
from deepface import DeepFace
import pyttsx3
import numpy as np
#Global Variables
# Intiliazing text to speech
engine = pyttsx3.init('sapi5')
# Defining camera and starting video capture
video_capture = cv2.VideoCapture(0)
# Defining which cascade opencv should use
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#Welcome Message
def welcome_message():
engine.say('Welcome to the face recognition and prediction tool of the herman hollerith center')
engine.runAndWait()
# Face_recognition and prediction function (maybe we should split that up - Single Responsibility principle)
def face_recognition():
while video_capture.isOpened():
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
analyze = DeepFace.analyze(frame, actions=["emotion", "age", "gender"])
for (x, y, w, h) in faces:
img = cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 1)
try:
#Adding the results of the prediction into the captured video
cv2.putText(frame,
f'Approx. Age: {analyze[0]["age"]}',
(x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
cv2.putText(frame,
f'Approx. Gender:{analyze[0]["dominant_gender"]}',
(x, y - 40), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 255), 2)
cv2.putText(frame,
f'Current emotion:{analyze[0]["dominant_emotion"]}',
(x, y - 60), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 255, 0), 2)
# Text to speech the results
engine.say(f'We have predicted that your age is {analyze[0]["age"]}, your gender is {analyze[0]["dominant_gender"]} and your current emotional state is {analyze[0]["dominant_emotion"]}' )
engine.runAndWait()
except:
print("No face detected")
#Showing the video
cv2.imshow("video_capture", frame)
# Adding a backout key and the time he should wait while showing the current picture
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
if __name__ == '__main__':
welcome_message()
face_recognition()
\ No newline at end of file
This diff is collapsed.
File suppressed by a .gitattributes entry or the file's encoding is unsupported.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment