Hello! How can I help you?
import os
os.system('cls' if os.name == 'nt' else 'clear')
import threading
import openai
import speech_recognition as sr
import pyttsx3
import pygame
from PIL import Image, ImageSequence
import cv2
import time
import sys
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def detect_motion(threshold=5000, warmup_time=2, max_attempts=500):
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("❌ Δεν ήταν δυνατή η πρόσβαση στην κάμερα.")
return False
print("🔄 Προετοιμασία κάμερας...")
time.sleep(warmup_time)
backSub = cv2.createBackgroundSubtractorMOG2()
attempts = 0
motion_detected = False
print("🔍 Ανίχνευση κίνησης σε εξέλιξη...")
while attempts < max_attempts:
ret, frame = cap.read()
if not ret:
print("❌ Απώλεια σύνδεσης με την κάμερα.")
break
fgMask = backSub.apply(frame)
contours, _ = cv2.findContours(fgMask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
total_area = sum(cv2.contourArea(c) for c in contours if cv2.contourArea(c) > 100)
if total_area > threshold:
print("🚨 Ανιχνεύτηκε κίνηση!")
motion_detected = True
break
attempts += 1
cap.release()
cv2.destroyAllWindows()
if not motion_detected:
print("⏳ Δεν ανιχνεύθηκε κίνηση εντός χρόνου.")
return motion_detected
pygame.init()
pygame.font.init()
image_path = r"C:\Users\tsifa\Desktop\avatar 111.webp"
avatar_frames = []
avatar_text = "Hello! How can I help you? My name is Stella 😊"
def create_avatar_animation(image_path):
if not os.path.exists(image_path):
print(f"❌ File {image_path} not found!")
sys.exit()
gif = Image.open(image_path)
frames = [
pygame.image.fromstring(frame.convert("RGBA").tobytes(), frame.size, "RGBA")
for frame in ImageSequence.Iterator(gif)
]
return frames
def show_avatar(running_check):
global avatar_frames
avatar_frames = create_avatar_animation(image_path)
frame_index = 0
screen_width, screen_height = 700, 700
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption("Stella")
font = pygame.font.SysFont("Calibri", 20, bold=True)
clock = pygame.time.Clock()
background_color = (30, 30, 30)
def render_text_wrapped(text, font, color, max_width):
words = text.split(' ')
lines = []
current_line = ''
for word in words:
test_line = current_line + word + ' '
if font.size(test_line)[0] <= max_width:
current_line = test_line
else:
lines.append(current_line)
current_line = word + ' '
lines.append(current_line)
surfaces = [font.render(line.strip(), True, color) for line in lines]
return surfaces
while running_check():
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
screen.fill(background_color)
current_frame = avatar_frames[frame_index]
avatar_rect = current_frame.get_rect(center=(screen_width // 2, screen_height // 2 - 30))
screen.blit(current_frame, avatar_rect)
frame_index = (frame_index + 1) % len(avatar_frames)
text_surfaces = render_text_wrapped(avatar_text, font, (200, 200, 200), screen_width - 40)
text_y = screen_height - (len(text_surfaces) * 25) - 20
for surface in text_surfaces:
text_rect = surface.get_rect(center=(screen_width // 2, text_y))
screen.blit(surface, text_rect)
text_y += 25
pygame.display.flip()
clock.tick(20)
def update_avatar_text(new_text):
global avatar_text
avatar_text = new_text
recognizer = sr.Recognizer()
microphone = sr.Microphone()
engine = pyttsx3.init()
voices = engine.getProperty('voices')
for voice in voices:
if 'female' in voice.name.lower() or 'zira' in voice.name.lower():
engine.setProperty('voice', voice.id)
break
def speak(text):
engine.say(text)
engine.runAndWait()
def listen():
with microphone as source:
recognizer.adjust_for_ambient_noise(source)
print("🎤 Μπορείς να μιλήσεις τώρα...")
try:
audio = recognizer.listen(source, timeout=5, phrase_time_limit=8)
return recognizer.recognize_google(audio, language="en-US")
except sr.UnknownValueError:
print("🤷 Δεν κατάλαβα, μπορείς να επαναλάβεις;")
return None
except sr.RequestError as e:
print(f"❌ Σφάλμα αναγνώρισης φωνής: {e}")
return None
except Exception as e:
print(f"⚠️ Άγνωστο σφάλμα: {e}")
return None
def truncate_to_two_sentences(text):
sentences = text.split(".")
truncated = ".".join(sentences[:2]).strip()
if not truncated.endswith("."):
truncated += "."
return truncated
def conversation_loop(update_avatar_text, running_check):
global running
conversation_context = []
initial_message = "Hello! How can I help you today?"
update_avatar_text(initial_message)
speak(initial_message)
time.sleep(0.5)
while running_check():
user_input = listen()
if user_input is None:
continue
if user_input.lower() in ["quit", "exit", "stop"]:
running = False
break
response = openai.chat.completions.create(
model="gpt-4-turbo-preview",
messages=[
{"role": "system", "content": "You are a helpful assistant. Please respond in no more than two concise sentences."},
{"role": "user", "content": user_input}
]
)
ai_response = response.choices[0].message.content
ai_response = truncate_to_two_sentences(ai_response)
print(f"User: {user_input}")
print(f"Stella: {ai_response}")
update_avatar_text(ai_response)
speak(ai_response)
time.sleep(0.5)
running = True
if __name__ == "__main__":
print("🚀 Εκκίνηση ανίχνευσης κίνησης...")
if detect_motion():
print("🎬 Έναρξη avatar και AI...")
conversation_thread = threading.Thread(target=conversation_loop, args=(update_avatar_text, lambda: running))
conversation_thread.start()
show_avatar(lambda: running)
conversation_thread.join()
else:
print("❌ Δεν ανιχνεύθηκε κίνηση, τερματισμός...")