From f3ff164b7bfe067403a20a7f13733a2ced2aee91 Mon Sep 17 00:00:00 2001
From: flaisch <simon.flaisch@student.reutlingen-university.de>
Date: Tue, 23 Apr 2024 08:59:02 +0200
Subject: [PATCH] Switched to fastapi, made some overall improvements and
 prepared the code for the latency-tests

---
 Consumer.py              |  2 +-
 ReceiveVideostreaming.py | 20 +++++++++++++
 Test.html                |  2 +-
 main.py                  | 62 +++++++++++++++++++---------------------
 4 files changed, 52 insertions(+), 34 deletions(-)
 create mode 100644 ReceiveVideostreaming.py

diff --git a/Consumer.py b/Consumer.py
index 50b523a..c06b81f 100644
--- a/Consumer.py
+++ b/Consumer.py
@@ -4,7 +4,7 @@ import paho.mqtt.client as mqtt
 # The callback for when the client receives a CONNACK response from the server.
 def on_connect(client, userdata, flags, reason_code, properties):
     print(f"Connected with result code {reason_code}")
-    client.subscribe("Topic1")
+    client.subscribe("Finger")
 
 # The callback for when a PUBLISH message is received from the server.
 def on_message(client, userdata, msg):
diff --git a/ReceiveVideostreaming.py b/ReceiveVideostreaming.py
new file mode 100644
index 0000000..7ebb1ee
--- /dev/null
+++ b/ReceiveVideostreaming.py
@@ -0,0 +1,20 @@
+import cv2
+from imutils.video import VideoStream
+
+cv2.namedWindow("preview")
+vc = VideoStream("http://localhost:88/video_feed").start()
+
+while True:
+    frame = vc.read()
+
+
+    cv2.imshow("preview", frame)
+    if cv2.waitKey(1) & 0xFF == ord('q'):
+        break
+
+
+
+
+
+cv2.destroyWindow("preview")
+vc.release()
\ No newline at end of file
diff --git a/Test.html b/Test.html
index d0449a5..225b48d 100644
--- a/Test.html
+++ b/Test.html
@@ -4,6 +4,6 @@
   </head>
   <body>
     <h1>HHZ Livestream</h1>
-    <img src="http://localhost:88/video_feed">
+    <img src="http://localhost:89/video_feed">
   </body>
 </html>
\ No newline at end of file
diff --git a/main.py b/main.py
index 3310202..f1f091b 100644
--- a/main.py
+++ b/main.py
@@ -1,5 +1,5 @@
-import os
 import cv2
+import uvicorn
 import warnings
 import numpy as np
 import pyttsx3
@@ -11,11 +11,8 @@ import json
 import requests
 from PIL import Image
 from imutils.video import VideoStream
-from flask import Response
-from flask import Flask
-from flask import render_template
-import argparse
-import datetime
+from fastapi import FastAPI
+from fastapi.responses import StreamingResponse
 import imutils
 
 
@@ -29,19 +26,38 @@ engine = pyttsx3.init('sapi5')
 
 
 
-app = Flask(__name__)
-
+app = FastAPI()
 # Definiere Kamera und starte Videoaufnahme
-video_capture =VideoStream(src=0).start()
+try:
+    video_capture =VideoStream("http://localhost:80/video_feed").start()
+except Exception as e:
+    print(f"We couldn't reach the camera")
+    exit()
 # Definiere, welche Cascade OpenCV verwenden soll
 face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
 
 time.sleep(2.0)
 
 class Main:
+
+    def __init__(self):
+        self.outputFrame = None
+        self.lock = threading.Lock()
+        self.wait_in_thread = None
+        self.streamThread = None
+        self.analysis_results = None
+        self.analysis_thread = None
+        self.speaking_thread = None  # Verfolgt den aktuellen Sprech-Thread
+        self.main_time = 0
+        self.last_chat_time = 0  # Zeitpunkt des letzten Sprechens API kosten beachten
+        self.currentclient = None
+        self.unacked_publish = set()
+        self.flag = False
+        self.selection = 3
+        self.it = 0
+
     def stream(self):
-        app.run(host="localhost", port=88, debug=True,
-                threaded=True, use_reloader=False)
+        uvicorn.run(host="localhost", port=89, app=app)
     def streamingThread(self):
         self.streamThread = threading.Thread(target=self.stream)
         self.streamThread.daemon = True
@@ -92,21 +108,6 @@ class Main:
         while len(self.unacked_publish):
             time.sleep(0.1)
         msg.wait_for_publish()
-    def __init__(self):
-        self.outputFrame = None
-        self.lock = threading.Lock()
-        self.wait_in_thread = None
-        self.streamThread = None
-        self.analysis_results = None
-        self.analysis_thread = None
-        self.speaking_thread = None  # Verfolgt den aktuellen Sprech-Thread
-        self.main_time = 0
-        self.last_chat_time = 0  # Zeitpunkt des letzten Sprechens API kosten beachten
-        self.currentclient = None
-        self.unacked_publish = set()
-        self.flag = False
-        self.selection = 3
-        self.it = 0
 
 
     def speak(self, text):
@@ -167,8 +168,6 @@ class Main:
     def face_recognition(self, mqttc):
 
         while True:
-            #cv2.namedWindow("video_capture", cv2.WINDOW_NORMAL)
-            #cv2.resizeWindow("video_capture", 800, 600)
 
             frame = video_capture.read()
             gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
@@ -231,7 +230,6 @@ class Main:
                         frame = cv2.cvtColor(np.array(frame_pil), cv2.COLOR_RGB2BGR)
 
                     if self.speaking_thread and self.speaking_thread.is_alive():
-                        #self.flag = False
                         self.it = self.it + 1
                         frame_pillow = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))# Auslagern
 
@@ -273,12 +271,12 @@ if __name__ == '__main__':
     main = Main()
     main.welcome_message()
     main.streamingThread()
-    @app.route("/video_feed")
+    @app.get("/video_feed")
     def video_feed():
         # return the response generated along with the specific media
         # type (mime type)
-        return Response(main.generate(),
-                        mimetype="multipart/x-mixed-replace; boundary=frame")
+        return StreamingResponse(main.generate(),
+                                 media_type="multipart/x-mixed-replace; boundary=frame")
     main.currentclient =main.connectwithmqtt(adress="localhost", targetport=1883)
     main.face_recognition(mqttc=main.currentclient)
 
-- 
GitLab