Skip to content
Snippets Groups Projects
Commit fc950a7f authored by Amel Abdic's avatar Amel Abdic :8ball:
Browse files

Implemented TTS

parent c945a102
No related branches found
No related tags found
No related merge requests found
#model.h5 is best model
import cv2
import warnings
import pyttsx3
warnings.filterwarnings("ignore")
import time
from deepface import DeepFace
import threading
# Intiliazing text to speech
# Initialisiere Text-to-Speech
engine = pyttsx3.init('sapi5')
# Defining camera and starting video capture
# Definiere Kamera und starte Videoaufnahme
video_capture = cv2.VideoCapture(0)
# Defining which cascade opencv should use
# Definiere, welche Cascade OpenCV verwenden soll
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
class Main:
# Welcome Message
def welcome_message(self):
# Intiliazing text to speech
def __init__(self):
self.speaking_thread = None # Verfolgt den aktuellen Sprech-Thread
self.last_speak_time = 0 # Speichert den Zeitpunkt der letzten Sprachausgabe
engine.say('Welcome to the face recognition and prediction tool of the herman hollerith center')
def speak(self, text):
engine.say(text)
engine.runAndWait()
def face_recognition(self):
def speak_in_thread(self, text):
current_time = time.time()
if (self.speaking_thread is None or not self.speaking_thread.is_alive()) and (current_time - self.last_speak_time > 8):
self.last_speak_time = current_time # Aktualisiere den Zeitpunkt der letzten Sprachausgabe
self.speaking_thread = threading.Thread(target=self.speak, args=(text,))
self.speaking_thread.start()
# Intiliazing text to speech
engine = pyttsx3.init('sapi5')
last_analysis_time = time.time() # Initialisiere die letzte Analysezeit
analysis_interval = 4
# Welcome Message
def welcome_message(self):
self.speak_in_thread('Welcome to the face recognition and prediction tool of the herman hollerith center')
def face_recognition(self):
last_analysis_time = time.time()
analysis_interval = 8
analyze = None
while True:
......@@ -40,36 +47,33 @@ class Main:
if current_time - last_analysis_time > analysis_interval:
try:
# Annahme: DeepFace.analyze gibt eine Liste zurück. Zugriff auf das erste Element.
analysis_results = DeepFace.analyze(frame, actions=["emotion", "age", "gender"], enforce_detection=False)
analyze = analysis_results[0] if analysis_results else None
last_analysis_time = current_time
except Exception as e:
print("Error in DeepFace Analysis:", e)
analyze = None # Bei Fehlern wird `analyze` zurückgesetzt
analyze = None
if len(faces) > 0 and analyze:
text_to_speak = f'Your age is {analyze.get("age", "N/A")}, your gender is {analyze.get("dominant_gender", "N/A")} and your current emotional state is {analyze.get("dominant_emotion", "N/A")}.'
self.speak_in_thread(text_to_speak)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 1)
if analyze:
# Zugriff auf die Analyseergebnisse, wenn `analyze` nicht None ist
cv2.putText(frame, f'Approx. Age: {analyze.get("age", "N/A")}', (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
cv2.putText(frame, f'Approx. Gender: {analyze.get("dominant_gender", "N/A")}', (x, y - 40), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 255), 2)
cv2.putText(frame, f'Current emotion: {analyze.get("dominant_emotion", "N/A")}', (x, y - 70), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 255, 0), 2)
cv2.imshow("video_capture", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main = Main()
#main.welcome_message()
main.welcome_message()
main.face_recognition()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment