Skip to content
Snippets Groups Projects
Commit e933887f authored by Simon Flaisch's avatar Simon Flaisch
Browse files

Implemented a threaded independend livestream, which can be accessed with the test.html file

parent 0d4ef4dc
No related branches found
No related tags found
No related merge requests found
<html>
<head>
<title>HHZ Livestream</title>
</head>
<body>
<h1>HHZ Livestream</h1>
<img src="http://localhost:88/video_feed">
</body>
</html>
\ No newline at end of file
import os
import cv2
import warnings
import numpy as np
import openai
import pyttsx3
warnings.filterwarnings("ignore")
import time
from deepface import DeepFace
import threading
......@@ -14,15 +10,62 @@ import paho.mqtt.client as mqtt
import json
import requests
from PIL import Image
from imutils.video import VideoStream
from flask import Response
from flask import Flask
from flask import render_template
import argparse
import datetime
import imutils
warnings.filterwarnings("ignore")
# Initialisiere Text-to-Speech
engine = pyttsx3.init('sapi5')
app = Flask(__name__)
# Definiere Kamera und starte Videoaufnahme
video_capture = cv2.VideoCapture(0)
video_capture =VideoStream(src=0).start()
# Definiere, welche Cascade OpenCV verwenden soll
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
time.sleep(2.0)
class Main:
def stream(self):
app.run(host="localhost", port=88, debug=True,
threaded=True, use_reloader=False)
def streamingThread(self):
self.streamThread = threading.Thread(target=self.stream)
self.streamThread.daemon = True
self.streamThread.start()
def generate(self):
# grab global references to the output frame and lock variables
# loop over frames from the output stream
while True:
# wait until the lock is acquired
with self.lock:
# check if the output frame is available, otherwise skip
# the iteration of the loop
if self.outputFrame is None:
continue
# encode the frame in JPEG format
(flag, encodedImage) = cv2.imencode(".jpg", self.outputFrame)
# ensure the frame was successfully encoded
if not flag:
continue
# yield the output frame in the byte format
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encodedImage) + b'\r\n')
def on_connect(self,client, userdata, flags, reason_code, properties):
print(f"Connected with result code {reason_code}\n")
client.subscribe("Topic1")
......@@ -50,7 +93,10 @@ class Main:
time.sleep(0.1)
msg.wait_for_publish()
def __init__(self):
self.outputFrame = None
self.lock = threading.Lock()
self.wait_in_thread = None
self.streamThread = None
self.analysis_results = None
self.analysis_thread = None
self.speaking_thread = None # Verfolgt den aktuellen Sprech-Thread
......@@ -62,14 +108,6 @@ class Main:
self.selection = 3
self.it = 0
def wait(self):
self.flag = True
def wait_in_thread(self):
if self.wait_in_thread is None or not self.wait_in_thread.is_alive():
self.wait_in_thread = threading.Thread(target=self.wait)
self.wait_in_thread.daemon = True
self.wait_in_thread.start()
def speak(self, text):
engine.say(text)
......@@ -106,7 +144,7 @@ class Main:
def perform_request(self, emotion):
headers = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjoiZDNmZTQyZWUtZmYwZC00OTRiLWIxZTAtNWIzNmE5YzA5ODY0IiwidHlwZSI6ImFwaV90b2tlbiJ9.Fhgy2npTWdM4FYTRYQJIAaG94SfpFhvKuGI5VlNBCVQ"
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjoiMmY4YTA2ODgtODIzNC00N2RhLTgwNjAtMjJhYjdlOWExNzE0IiwidHlwZSI6InNhbmRib3hfYXBpX3Rva2VuIn0.RbkSarkirWu8MyOved0gmafiGa0XauxoSaf1flSg3s4"
}
payload = {
"providers": "openai",
......@@ -127,12 +165,12 @@ class Main:
print(f"Fehler beim Senden/Empfangen der Daten zu/von ChatGPT: {e}")
def face_recognition(self, mqttc):
while True:
self.wait()
cv2.namedWindow("video_capture", cv2.WINDOW_NORMAL)
cv2.resizeWindow("video_capture", 800, 600)
#cv2.namedWindow("video_capture", cv2.WINDOW_NORMAL)
#cv2.resizeWindow("video_capture", 800, 600)
ret, frame = video_capture.read()
frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=10)
current_time = time.time()
......@@ -192,8 +230,8 @@ class Main:
frame = cv2.cvtColor(np.array(frame_pil), cv2.COLOR_RGB2BGR)
if self.speaking_thread and self.speaking_thread.is_alive() and self.flag == True:
self.flag = False
if self.speaking_thread and self.speaking_thread.is_alive():
#self.flag = False
self.it = self.it + 1
frame_pillow = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))# Auslagern
......@@ -217,21 +255,30 @@ class Main:
frame = cv2.cvtColor(np.array(frame_pillow), cv2.COLOR_RGB2BGR)
with self.lock:
self.outputFrame = frame.copy()
cv2.imshow("video_capture", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
print("Goodbye!")
mqttc.disconnect()
mqttc.loop_stop()
break
video_capture.release()
video_capture.stop()
cv2.destroyAllWindows()
if __name__ == '__main__':
main = Main()
main.welcome_message()
main.streamingThread()
@app.route("/video_feed")
def video_feed():
# return the response generated along with the specific media
# type (mime type)
return Response(main.generate(),
mimetype="multipart/x-mixed-replace; boundary=frame")
main.currentclient =main.connectwithmqtt(adress="localhost", targetport=1883)
main.face_recognition(mqttc=main.currentclient)
......
......
File deleted
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment