diff --git a/main.py b/main.py
index d267dd21afa0f46e5a11c1d703cd4710934afdc1..54400b4c97a6a4920c3da4b0821a28dd06eb7915 100644
--- a/main.py
+++ b/main.py
@@ -1,4 +1,5 @@
 import os
+import textwrap
 from turtle import position
 
 import warnings
@@ -109,6 +110,7 @@ class Main:
         self.flag = False
         self.selection = 3
         self.it = 0
+        self.text = None
 
 
 
@@ -163,6 +165,7 @@ class Main:
             generated_text = result['openai']['generated_text']
             print(generated_text)
             self.speak_in_thread(generated_text)
+            self.text = generated_text
         except Exception as e:
             print(f"Fehler beim Senden/Empfangen der Daten zu/von ChatGPT: {e}")
 
@@ -216,7 +219,10 @@ class Main:
                     cv2.putText(frame, f'Approx. Age: {analyze.get("age", "N/A")}', (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
                     cv2.putText(frame, f'Approx. Gender: {analyze.get("dominant_gender", "N/A")}', (x, y - 40), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 255), 2)
                     cv2.putText(frame, f'Current emotion: {analyze.get("dominant_emotion", "N/A")}', (x, y - 70), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 255, 0), 2)
-
+                    if self.text != None:
+                        new_text = textwrap.fill(self.text, width=35)
+                        for i, line in enumerate(new_text.split('\n')):
+                            cv2.putText(frame, line, (50, 900 + i * 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 255, 255), 2)
                     emotion = analyze.get("dominant_emotion", "N/A")
                     if emotion in emoji_images: