Skip to content
Snippets Groups Projects
Commit 42135c4a authored by Amel Abdic's avatar Amel Abdic :8ball:
Browse files

Implemented Handy VideoFeed

parents
No related branches found
No related tags found
No related merge requests found
Showing
with 415 additions and 0 deletions
/.venv/
/.idea/
\ No newline at end of file
<html>
<head>
<style>
</style>
</head>
<body>
<div id="smileyContainer">No emotion recognized so far</div>
</body>
<script>
const webSocket = new WebSocket('ws://localhost:443/');
webSocket.onmessage = (event) => {
let obj = JSON.parse(event.data);
if (obj.emotion === "happy") {
smileyContainer.innerHTML = ":-)";
}
if (obj.emotion === "neutral") {
smileyContainer.innerHTML = ":-|";
}
if (obj.emotion === "angry") {
smileyContainer.innerHTML = ":-(";
}
};
</script>
</html>
\ No newline at end of file
import keyboard
import paho.mqtt.client as mqtt
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, reason_code, properties):
print(f"Connected with result code {reason_code}")
client.subscribe("Topic1")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
mqttc = mqtt.Client(mqtt.CallbackAPIVersion.VERSION2, userdata=None)
mqttc.on_connect = on_connect
mqttc.on_message = on_message
mqttc.username_pw_set(username="standardUser", password="GreatHHZ4Ever!")
mqttc.connect("localhost", 1883)
while True:
mqttc.loop_start()
if keyboard.read_key == "space":
mqttc.disconnect()
mqttc.loop_stop()
print("goodbye!")
break
mqttc.loop_stop()
FROM eclipse-mosquitto
COPY mosquitto.conf ./mosquitto/config/
COPY mosquitto.passwd ./mosquitto/config/mosquitto.passwd
version: "3.7"
services:
mosquitto:
build: ./
image: uwebreitenbuecher/mosquitto
ports:
- "1883:1883"
- "9001:9001"
\ No newline at end of file
allow_anonymous false
listener 1883
listener 9001
protocol websockets
persistence false
persistence_file mosquitto.db
persistence_location /mosquitto/data/
log_dest stdout
password_file /mosquitto/config/mosquitto.passwd
\ No newline at end of file
standardUser:$7$101$qj8csF6pkG5K5UgI$5L3+LNq5NAzXf5Cb9GXLDuhU1agDFnSYFKc+hBUy0bV9wZeo4QND7YqTaK0m92pDnj2tHjA4BztW4CHHf5q/uQ==
<html>
<head>
<title>HHZ Livestream</title>
</head>
<body>
<h1>HHZ Livestream</h1>
<img src="http://localhost:88/video_feed">
</body>
</html>
\ No newline at end of file
emoji_folder/angry.png

19.8 KiB

emoji_folder/fear.png

20.4 KiB

emoji_folder/happy.png

18.6 KiB

emoji_folder/mas1.png

153 KiB

emoji_folder/mas2.png

154 KiB

emoji_folder/mas3.png

156 KiB

emoji_folder/neutral.png

13.9 KiB

emoji_folder/sad.png

20.6 KiB

emoji_folder/surprise.png

22.7 KiB

This diff is collapsed.
main.py 0 → 100644
import os
from turtle import position
import warnings
import pyttsx3
import time
from deepface import DeepFace
import threading
import paho.mqtt.client as mqtt
import json
from PIL import Image
from imutils.video import VideoStream
from flask import Response
from flask import Flask
import cv2
import requests
import numpy as np
warnings.filterwarnings("ignore")
# Initialisiere Text-to-Speech
engine = pyttsx3.init('sapi5')
app = Flask(__name__)
# Definiere Kamera und starte Videoaufnahme
video_capture =VideoStream(src=0).start()
# Definiere, welche Cascade OpenCV verwenden soll
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
time.sleep(2.0)
class Main:
def fetch_frame(self):
url = 'http://192.168.178.82:8080/shot.jpg'
response = requests.get(url)
image_array = np.array(bytearray(response.content), dtype=np.uint8)
frame = cv2.imdecode(image_array, -1)
return frame
def stream(self):
app.run(host="localhost", port=88, debug=True,
threaded=True, use_reloader=False)
def streamingThread(self):
self.streamThread = threading.Thread(target=self.stream)
self.streamThread.daemon = True
self.streamThread.start()
def generate(self):
# grab global references to the output frame and lock variables
# loop over frames from the output stream
while True:
# wait until the lock is acquired
with self.lock:
# check if the output frame is available, otherwise skip
# the iteration of the loop
if self.outputFrame is None:
continue
# encode the frame in JPEG format
(flag, encodedImage) = cv2.imencode(".jpg", self.outputFrame)
# ensure the frame was successfully encoded
if not flag:
continue
# yield the output frame in the byte format
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encodedImage) + b'\r\n')
def on_connect(self, client, userdata, flags, reason_code, properties):
print(f"Connected with result code {reason_code}\n")
client.subscribe("Topic1")
def on_publish(self,client, userdata, mid, reason_code, properties):
try:
userdata.remove(mid)
except KeyError:
"Something went wrong. Please check if you have removed the mid from the userdata"
def connectwithmqtt(self, adress:str, targetport:int):
mqttc = mqtt.Client(mqtt.CallbackAPIVersion.VERSION2, userdata=None)
mqttc.on_connect = self.on_connect
mqttc.on_publish = self.on_publish
mqttc.user_data_set(self.unacked_publish)
mqttc.username_pw_set(username="standardUser", password="GreatHHZ4Ever!")
mqttc.connect(host=adress, port=targetport)
while not mqttc.is_connected():
mqttc.loop()
print("Waiting for connection...")
return mqttc
def sendMessage(self, mqttc, topic, message):
mqttc.loop_start()
msg = mqttc.publish(topic, message, qos=1)
self.unacked_publish.add(msg.mid)
while len(self.unacked_publish):
time.sleep(0.1)
msg.wait_for_publish()
def __init__(self):
self.outputFrame = None
self.lock = threading.Lock()
self.wait_in_thread = None
self.streamThread = None
self.analysis_results = None
self.analysis_thread = None
self.speaking_thread = None # Verfolgt den aktuellen Sprech-Thread
self.main_time = 0
self.last_chat_time = 0 # Zeitpunkt des letzten Sprechens API kosten beachten
self.currentclient = None
self.unacked_publish = set()
self.flag = False
self.selection = 3
self.it = 0
def speak(self, text):
engine.say(text)
engine.runAndWait()
def speak_in_thread(self, text):
if (self.speaking_thread is None or not self.speaking_thread.is_alive()):
self.speaking_thread = threading.Thread(target=self.speak, args=(text,))
self.speaking_thread.daemon = True
self.speaking_thread.start()
def facial_analysis_thread(self, faceframe):
if (self.analysis_thread is None or not self.analysis_thread.is_alive()):
self.analysis_thread = threading.Thread(target=self.facial_analysis, args=(faceframe,))
self.analysis_thread.daemon = True
self.analysis_thread.start()
def facial_analysis(self, faceframe):
self.analysis_results = DeepFace.analyze(faceframe, actions=["emotion", "age", "gender"], enforce_detection=False)
# Welcome Message
def welcome_message(self):
self.speak_in_thread('Welcome to the face recognition and prediction tool of the herman hollerith center')
def send_emotion_to_chatgpt_and_speak(self, emotion):
current_time = time.time()
if current_time - self.last_chat_time < 10:
return
self.last_chat_time = current_time
request_thread = threading.Thread(target=self.perform_request, args=(emotion,))
request_thread.daemon = True
request_thread.start()
def perform_request(self, emotion):
headers = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjoiMmY4YTA2ODgtODIzNC00N2RhLTgwNjAtMjJhYjdlOWExNzE0IiwidHlwZSI6InNhbmRib3hfYXBpX3Rva2VuIn0.RbkSarkirWu8MyOved0gmafiGa0XauxoSaf1flSg3s4"
}
payload = {
"providers": "openai",
"text": f"Die Person fühlt sich {emotion} an. Antworte auf Deutsch ohne Komma und in einem kurzen Satz. Motiviere die Person, erwähne die {emotion} Emotion. Du bist in einem Face detection Programm versuche immer Variation in neuen Sätzen zu bringen.",
"chatbot_global_action": "Act as an assistant",
"previous_history": [],
"temperature": 0.0,
"max_tokens": 150,
"fallback_providers": ""
}
try:
response = requests.post("https://api.edenai.run/v2/text/chat", json=payload, headers=headers)
result = response.json()
generated_text = result['openai']['generated_text']
print(generated_text)
self.speak_in_thread(generated_text)
except Exception as e:
print(f"Fehler beim Senden/Empfangen der Daten zu/von ChatGPT: {e}")
def face_recognition(self, mqttc):
while True:
frame = self.fetch_frame()
#cv2.namedWindow("video_capture", cv2.WINDOW_NORMAL)
#cv2.resizeWindow("video_capture", 800, 600)
#frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=10)
current_time = time.time()
try:
if current_time - self.main_time > 2:
self.main_time = current_time
self.facial_analysis_thread(frame)
analyze = self.analysis_results[0] if self.analysis_results else None
analysis_results_dict = {"age": analyze.get("age", "N/A"), "gender": analyze.get("dominant_gender", "N/A"), "emotion": analyze.get("dominant_emotion", "N/A")}
self.sendMessage(mqttc=mqttc, topic="Topic1", message=json.dumps(analysis_results_dict))
except Exception as e:
print("Error in DeepFace Analysis:", e)
analyze = None
if len(faces) > 0 and analyze:
#text_to_speak = f'Your age is {analyze.get("age", "N/A")}, your gender is {analyze.get("dominant_gender", "N/A")} and your current emotional state is {analyze.get("dominant_emotion", "N/A")}.'
#self.speak_in_thread(text_to_speak)
emotion = analyze.get("dominant_emotion", "N/A")
if emotion in ["happy", "neutral", "sad", "fear", "suprise", "angry"]:
self.send_emotion_to_chatgpt_and_speak(emotion)
maskottchen_images = {
"mas1": cv2.imread("emoji_folder/mas1.png", cv2.IMREAD_UNCHANGED),
"mas2": cv2.imread("emoji_folder/mas2.png", cv2.IMREAD_UNCHANGED),
"mas3": cv2.imread("emoji_folder/mas3.png", cv2.IMREAD_UNCHANGED),
}
emoji_images = {
"happy": cv2.imread("emoji_folder/happy.png", cv2.IMREAD_UNCHANGED),
"neutral": cv2.imread("emoji_folder/neutral.png", cv2.IMREAD_UNCHANGED),
"sad": cv2.imread("emoji_folder/sad.png", cv2.IMREAD_UNCHANGED),
"fear": cv2.imread("emoji_folder/fear.png", cv2.IMREAD_UNCHANGED),
"surprise": cv2.imread("emoji_folder/surprise.png", cv2.IMREAD_UNCHANGED),
"angry": cv2.imread("emoji_folder/angry.png", cv2.IMREAD_UNCHANGED)
}
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 1)
if analyze:
cv2.putText(frame, f'Approx. Age: {analyze.get("age", "N/A")}', (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
cv2.putText(frame, f'Approx. Gender: {analyze.get("dominant_gender", "N/A")}', (x, y - 40), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 255), 2)
cv2.putText(frame, f'Current emotion: {analyze.get("dominant_emotion", "N/A")}', (x, y - 70), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 255, 0), 2)
emotion = analyze.get("dominant_emotion", "N/A")
if emotion in emoji_images:
frame_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
emoji_pil = Image.fromarray(cv2.cvtColor(emoji_images[emotion], cv2.COLOR_BGRA2RGBA))
emoji_resized = emoji_pil.resize((w+20, h+20))
x_offset = x-10
y_offset = y-10
frame_pil.paste(emoji_resized, (x_offset, y_offset), mask=emoji_resized)
frame = cv2.cvtColor(np.array(frame_pil), cv2.COLOR_RGB2BGR)
if self.speaking_thread and self.speaking_thread.is_alive():
#self.flag = False
self.it = self.it + 1
frame_pillow = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))# Auslagern
mas_pil = Image.fromarray(cv2.cvtColor(maskottchen_images["mas1"], cv2.COLOR_BGRA2RGBA))# Auslagern
mas_pil2 = Image.fromarray(cv2.cvtColor(maskottchen_images["mas2"], cv2.COLOR_BGRA2RGBA))# Auslagern
mas_pil3 = Image.fromarray(cv2.cvtColor(maskottchen_images["mas3"], cv2.COLOR_BGRA2RGBA))# Auslagern
mas_resized = mas_pil.resize((200, 200))
mas_resized2 = mas_pil2.resize((200, 200))
mas_resized3 = mas_pil3.resize((200, 200))
x_offset = 400
y_offset = 700
if self.it % self.selection == 0:
frame_pillow.paste(mas_resized, (x_offset, y_offset), mask=mas_resized)
if self.it % self.selection == 1:
frame_pillow.paste(mas_resized2, (x_offset, y_offset), mask=mas_resized)
if self.it % self.selection == 2:
frame_pillow.paste(mas_resized3, (x_offset, y_offset), mask=mas_resized)
frame = cv2.cvtColor(np.array(frame_pillow), cv2.COLOR_RGB2BGR)
with self.lock:
self.outputFrame = frame.copy()
cv2.imshow("video_capture", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
print("Goodbye!")
mqttc.disconnect()
mqttc.loop_stop()
break
video_capture.stop()
cv2.destroyAllWindows()
if __name__ == '__main__':
main = Main()
main.welcome_message()
main.streamingThread()
@app.route("/video_feed")
def video_feed():
# return the response generated along with the specific media
# type (mime type)
return Response(main.generate(),
mimetype="multipart/x-mixed-replace; boundary=frame")
main.currentclient =main.connectwithmqtt(adress="localhost", targetport=1883)
main.face_recognition(mqttc=main.currentclient)
const mqtt = require("mqtt");
const ws = require ("ws");
let socketserver = new ws.WebSocketServer({
port: 443
});
let client = mqtt.connect("mqtt://localhost:1883",{
username: "standardUser",
password: "GreatHHZ4Ever!"
});
client.on("connect", function() {
let topicID = "Topic1";
client.subscribe(topicID);
});
socketserver.on('connection', ws => {
console.log("new client connected");
});
socketserver.on('close', () => console.log('Client has disconnected!'));
client.on("message", function(topic, message){
socketserver.clients.forEach(client => {
client.send(message.toString());
})});
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment