Spaces:
Running
Running
File size: 24,723 Bytes
5bb8fe7 e7d41ad 5bb8fe7 a7e7850 7834426 5bb8fe7 e7d41ad 7834426 4cff2f7 5bb8fe7 7834426 5bef5e2 7834426 5bb8fe7 7834426 a7e7850 5bb8fe7 99636da 7834426 5bb8fe7 a46c042 5bb8fe7 5bef5e2 a7e7850 7834426 a7e7850 5bb8fe7 76a264c e7d41ad 5bb8fe7 e7d41ad 5bb8fe7 e7d41ad 7834426 a7e7850 76a264c 99636da 76a264c 7834426 5bb8fe7 7834426 a7e7850 7834426 a46c042 7834426 5bb8fe7 a7e7850 76a264c 5eacad5 4cff2f7 5eacad5 5bb8fe7 a7e7850 5bb8fe7 7834426 76a264c 7834426 5bb8fe7 76a264c c0bbe6a 7834426 5bb8fe7 7834426 76a264c 5bb8fe7 7834426 5bb8fe7 76a264c 7834426 99636da 5bef5e2 7834426 a7e7850 5bef5e2 7834426 a7e7850 5bef5e2 7834426 5bef5e2 5bb8fe7 76a264c 5bef5e2 7834426 a7e7850 7834426 5bef5e2 7834426 a7e7850 7834426 5bb8fe7 76a264c 7834426 5bb8fe7 76a264c 5bef5e2 5bb8fe7 7834426 76a264c 7834426 5bef5e2 7834426 a7e7850 7834426 5bb8fe7 7834426 5bb8fe7 76a264c 7834426 a7e7850 5bb8fe7 5bef5e2 99636da 76a264c 7834426 5bb8fe7 7834426 76a264c 5bb8fe7 7834426 a7e7850 5bef5e2 7834426 5bef5e2 5bb8fe7 a7e7850 5bb8fe7 76a264c 7834426 5bef5e2 a7e7850 76a264c 7834426 76a264c 7834426 76a264c a7e7850 7834426 76a264c 5bef5e2 99636da 76a264c 7834426 99636da 5bef5e2 76a264c 5bb8fe7 76a264c a7e7850 5bb8fe7 7834426 76a264c 7834426 5bb8fe7 7834426 5bb8fe7 a7e7850 7834426 a7e7850 5bef5e2 7834426 a7e7850 5bef5e2 5bb8fe7 7834426 a7e7850 99636da a7e7850 7834426 5bef5e2 7834426 5bef5e2 7834426 a46c042 7834426 99636da 5bef5e2 a46c042 7834426 5bef5e2 a46c042 7834426 a7e7850 7834426 a46c042 99636da 5bef5e2 7834426 a46c042 7834426 5bb8fe7 76a264c 7834426 a46c042 7834426 76a264c 7834426 5bef5e2 5bb8fe7 76a264c 7834426 5bb8fe7 7834426 a46c042 76a264c a46c042 7834426 76a264c 7834426 a46c042 7834426 a46c042 7834426 a46c042 7834426 99636da a46c042 7834426 5bef5e2 7834426 a7e7850 7834426 99636da 7834426 5bef5e2 76a264c a7e7850 7834426 76a264c 7834426 a7e7850 7834426 5bef5e2 7834426 a7e7850 7834426 5bb8fe7 a46c042 7834426 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 |
# -*- coding: utf-8 -*-
import streamlit as st
import os
import asyncio
import base64
import io
import threading
import traceback
import atexit
import time
import logging
from dotenv import load_dotenv
import cv2 # For image processing
import pyaudio # For audio PLAYBACK
import PIL.
Image
from google import genai
from google.genai import types
# streamlit-webrtc components
from streamlit_webrtc import (
webrtc_streamer,
WebRtcMode,
AudioProcessorBase,
VideoProcessorBase,
)
# from aiortc import RTCIceServer, RTCConfiguration # Not needed directly
# --- Configuration ---
load_dotenv()
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(threadName)s - %(levelname)s - %(message)s')
logging.info("Application starting up...")
# Audio configuration
PYAUDIO_FORMAT = pyaudio.paInt16
PYAUDIO_CHANNELS = 1
WEBRTC_REQUESTED_AUDIO_CHANNELS = 1
WEBRTC_REQUESTED_SEND_SAMPLE_RATE = 16000
GEMINI_AUDIO_RECEIVE_SAMPLE_RATE = 24000
PYAUDIO_PLAYBACK_CHUNK_SIZE = 1024
AUDIO_PLAYBACK_QUEUE_MAXSIZE = 50
MEDIA_TO_GEMINI_QUEUE_MAXSIZE = 30
# Video configuration
VIDEO_FPS_TO_GEMINI = 2
VIDEO_API_RESIZE = (1024, 1024)
# !!! IMPORTANT: Verify this model name is correct for the Live API !!!
# This is a common point of failure for ConnectionClosedError.
MODEL_NAME = "models/gemini-2.0-flash-live-001"
logging.info(f"Using Gemini Model: {MODEL_NAME}")
MEDICAL_ASSISTANT_SYSTEM_PROMPT = """You are an AI Medical Assistant. Your primary function is to analyze visual information from the user's camera or screen and respond via voice.
Your responsibilities are:
1. **Visual Observation and Description:** Carefully examine the images or video feed. Describe relevant details you observe.
2. **General Information (Non-Diagnostic):** Provide general information related to what is visually presented, if applicable. You are not a diagnostic tool.
3. **Safety and Disclaimer (CRITICAL):**
* You are an AI assistant, **NOT a medical doctor or a substitute for one.**
* **DO NOT provide medical diagnoses, treatment advice, or interpret medical results (e.g., X-rays, scans, lab reports).**
* When appropriate, and always if the user seems to be seeking diagnosis or treatment, explicitly state your limitations and **strongly advise the user to consult a qualified healthcare professional.**
* If you see something that *appears* visually concerning (e.g., an unusual skin lesion, signs of injury), you may gently suggest it might be wise to have it looked at by a professional, without speculating on what it is.
4. **Tone:** Maintain a helpful, empathetic, and calm tone.
5. **Interaction:** After this initial instruction, you can make a brief acknowledgment of your role (e.g., "I'm ready to assist by looking at what you show me. Please remember to consult a doctor for medical advice."). Then, focus on responding to the user's visual input and questions.
Example of a disclaimer you might use: "As an AI assistant, I can describe what I see, but I can't provide medical advice or diagnoses. For any health concerns, it's always best to speak with a doctor or other healthcare professional."
"""
# --- PyAudio Global Instance and Cleanup ---
pya = None
try:
pya = pyaudio.PyAudio()
def cleanup_pyaudio():
logging.info("Terminating PyAudio instance.")
if pya: pya.terminate()
atexit.register(cleanup_pyaudio)
logging.info("PyAudio initialized successfully.")
except Exception as e_pyaudio:
logging.warning(f"PyAudio initialization failed (expected in some server environments): {e_pyaudio}")
pya = None
# --- Global Queues - Declare as None, initialize later ---
video_frames_to_gemini_q: asyncio.Queue = None
audio_chunks_to_gemini_q: asyncio.Queue = None
audio_from_gemini_playback_q: asyncio.Queue = None
# --- Gemini Client Setup ---
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
client = None
if GEMINI_API_KEY:
try:
client = genai.Client(http_options={"api_version": "v1beta"}, api_key=GEMINI_API_KEY)
logging.info("Gemini client initialized successfully.")
except Exception as e:
st.error(f"Failed to initialize Gemini client: {e}")
logging.critical(f"Gemini client initialization failed: {e}", exc_info=True)
st.stop()
else:
st.error("GEMINI_API_KEY not found in environment variables. Please set it for the application to run.")
logging.critical("GEMINI_API_KEY not found.")
st.stop()
# Gemini LiveConnectConfig - HIGHLY SIMPLIFIED FOR DEBUGGING ConnectionClosedError
# Start with the absolute minimum. If this connects, incrementally add back features.
# If this still fails, the issue is likely MODEL_NAME or API Key/Project permissions.
LIVE_CONNECT_CONFIG = types.LiveConnectConfig(
response_modalities=["audio"], # Start with text only
speech_config=types.SpeechConfig(
voice_config=types.VoiceConfig(
prebuilt_voice_config=types.PrebuiltVoiceConfig(voice_name="Puck")
),
# Explicitly None or omit
)
logging.info(f"Attempting connection with highly simplified LiveConnectConfig: {LIVE_CONNECT_CONFIG}")
# --- Backend Gemini Interaction Loop ---
class GeminiInteractionLoop:
def __init__(self):
self.gemini_session = None
self.async_event_loop = None
self.is_running = True
self.playback_stream = None
async def send_text_input_to_gemini(self, user_text):
if not user_text or not self.gemini_session or not self.is_running:
logging.warning("Cannot send text. Session not active, no text, or not running.")
return
try:
logging.info(f"Sending text to Gemini: '{user_text[:50]}...'")
await self.gemini_session.send(input=user_text, end_of_turn=True)
except Exception as e:
logging.error(f"Error sending text message to Gemini: {e}", exc_info=True)
async def stream_media_to_gemini(self):
logging.info("Task started: Stream media from WebRTC queues to Gemini.")
async def get_media_from_queues():
if video_frames_to_gemini_q is None or audio_chunks_to_gemini_q is None:
await asyncio.sleep(0.1); return None
try:
video_frame = await asyncio.wait_for(video_frames_to_gemini_q.get(), timeout=0.02)
if video_frame is None: return None # Sentinel received
video_frames_to_gemini_q.task_done(); return video_frame
except asyncio.TimeoutError: pass
except Exception as e: logging.error(f"Error getting video from queue: {e}", exc_info=True)
try:
audio_chunk = await asyncio.wait_for(audio_chunks_to_gemini_q.get(), timeout=0.02)
if audio_chunk is None: return None # Sentinel received
audio_chunks_to_gemini_q.task_done(); return audio_chunk
except asyncio.TimeoutError: return None
except Exception as e: logging.error(f"Error getting audio from queue: {e}", exc_info=True); return None
try:
while self.is_running:
if not self.gemini_session: await asyncio.sleep(0.1); continue
media_data = await get_media_from_queues()
if media_data is None and not self.is_running: break # Sentinel and stop signal
if media_data and self.gemini_session and self.is_running:
try: await self.gemini_session.send(input=media_data)
except Exception as e: logging.error(f"Error sending media chunk to Gemini: {e}", exc_info=True)
elif not media_data: await asyncio.sleep(0.05) # No data, yield
except asyncio.CancelledError: logging.info("Task cancelled: stream_media_to_gemini.")
finally: logging.info("Task finished: stream_media_to_gemini.")
async def process_gemini_responses(self):
logging.info("Task started: Process responses from Gemini.")
try:
while self.is_running:
if not self.gemini_session: await asyncio.sleep(0.1); continue
if audio_from_gemini_playback_q is None: await asyncio.sleep(0.1); continue
try:
turn_response = self.gemini_session.receive()
async for chunk in turn_response:
if not self.is_running: break
if audio_data := chunk.data:
if not audio_from_gemini_playback_q.full(): audio_from_gemini_playback_q.put_nowait(audio_data)
else: logging.warning("Audio playback queue full, discarding Gemini audio data.")
if text_response := chunk.text:
logging.info(f"Gemini text response: {text_response[:100]}")
if 'chat_messages' not in st.session_state: st.session_state.chat_messages = []
st.session_state.chat_messages = st.session_state.chat_messages + [{"role": "assistant", "content": text_response}]
# Consider using st.rerun() via a thread-safe mechanism if immediate UI update is critical
except types.generation_types.StopCandidateException: logging.info("Gemini response stream ended normally.")
except Exception as e:
if self.is_running: logging.error(f"Error receiving from Gemini: {e}", exc_info=True)
await asyncio.sleep(0.1)
except asyncio.CancelledError: logging.info("Task cancelled: process_gemini_responses.")
finally: logging.info("Task finished: process_gemini_responses.")
async def play_gemini_audio(self):
logging.info("Task started: Play Gemini audio responses.")
if pya is None:
logging.warning("PyAudio not available. Audio playback task will not run.")
return
try:
while audio_from_gemini_playback_q is None and self.is_running: await asyncio.sleep(0.1)
if not self.is_running: return
self.playback_stream = await asyncio.to_thread(
pya.open, format=PYAUDIO_FORMAT, channels=PYAUDIO_CHANNELS, rate=GEMINI_AUDIO_RECEIVE_SAMPLE_RATE, output=True, frames_per_buffer=PYAUDIO_PLAYBACK_CHUNK_SIZE
)
logging.info(f"PyAudio playback stream opened at {GEMINI_AUDIO_RECEIVE_SAMPLE_RATE} Hz.")
while self.is_running:
try:
audio_chunk = await asyncio.wait_for(audio_from_gemini_playback_q.get(), timeout=1.0)
if audio_chunk is None and not self.is_running: break # Sentinel and stop signal
if audio_chunk: await asyncio.to_thread(self.playback_stream.write, audio_chunk)
if audio_chunk: audio_from_gemini_playback_q.task_done()
except asyncio.TimeoutError: continue
except Exception as e: logging.error(f"Error playing audio chunk: {e}", exc_info=True); await asyncio.sleep(0.01)
except Exception as e:
logging.error(f"Failed to open or use PyAudio playback stream (might be expected in this environment): {e}", exc_info=True)
finally:
if self.playback_stream:
logging.info("Stopping and closing PyAudio playback stream.")
try:
await asyncio.to_thread(self.playback_stream.stop_stream)
await asyncio.to_thread(self.playback_stream.close)
except Exception as e_close: logging.error(f"Error closing playback stream: {e_close}", exc_info=True)
self.playback_stream = None
logging.info("Task finished: play_gemini_audio.")
def signal_stop(self):
logging.info("Signal to stop GeminiInteractionLoop received.")
self.is_running = False
for q_name, q_obj_ref in [("video_q", video_frames_to_gemini_q),
("audio_in_q", audio_chunks_to_gemini_q),
("audio_out_q", audio_from_gemini_playback_q)]:
if q_obj_ref:
try: q_obj_ref.put_nowait(None)
except asyncio.QueueFull: logging.warning(f"Queue {q_name} was full when trying to put sentinel for stop signal.")
except Exception as e: logging.error(f"Error putting sentinel in {q_name}: {e}", exc_info=True)
async def run_main_loop(self):
global video_frames_to_gemini_q, audio_chunks_to_gemini_q, audio_from_gemini_playback_q
self.async_event_loop = asyncio.get_running_loop()
self.is_running = True
logging.info("GeminiInteractionLoop run_main_loop starting...")
video_frames_to_gemini_q = asyncio.Queue(maxsize=MEDIA_TO_GEMINI_QUEUE_MAXSIZE)
audio_chunks_to_gemini_q = asyncio.Queue(maxsize=MEDIA_TO_GEMINI_QUEUE_MAXSIZE)
audio_from_gemini_playback_q = asyncio.Queue(maxsize=AUDIO_PLAYBACK_QUEUE_MAXSIZE)
logging.info("Asyncio queues initialized in GeminiInteractionLoop.")
if client is None: logging.critical("Gemini client is None in run_main_loop. Aborting."); return
try:
async with client.aio.live.connect(model=MODEL_NAME, config=LIVE_CONNECT_CONFIG) as session:
self.gemini_session = session
logging.info(f"Gemini session established with API for model {MODEL_NAME}.")
try:
logging.info("Sending system prompt to Gemini...")
await self.gemini_session.send(input=MEDICAL_ASSISTANT_SYSTEM_PROMPT, end_of_turn=False)
logging.info("System prompt sent successfully.")
except Exception as e:
logging.error(f"Failed to send system prompt: {e}", exc_info=True)
self.is_running = False; return
# Python 3.9 does not have asyncio.TaskGroup, so manage tasks individually
tasks = []
try:
logging.info("Creating async tasks for Gemini interaction...")
tasks.append(asyncio.create_task(self.stream_media_to_gemini(), name="stream_media_to_gemini"))
tasks.append(asyncio.create_task(self.process_gemini_responses(), name="process_gemini_responses"))
tasks.append(asyncio.create_task(self.play_gemini_audio(), name="play_gemini_audio"))
logging.info("All Gemini interaction tasks created.")
await asyncio.gather(*tasks) # Wait for all tasks to complete
except Exception as e_gather: # Catch errors from tasks gathered
logging.error(f"Error during asyncio.gather: {e_gather}", exc_info=True)
for task in tasks:
if not task.done(): task.cancel() # Cancel pending tasks
await asyncio.gather(*tasks, return_exceptions=True) # Wait for cancellations
logging.info("Gemini interaction tasks finished or cancelled.")
except asyncio.CancelledError: logging.info("GeminiInteractionLoop.run_main_loop() was cancelled.")
except Exception as e: # General catch-all, including ConnectionClosedError
logging.error(f"Exception in GeminiInteractionLoop run_main_loop: {type(e).__name__}: {e}", exc_info=True)
finally:
logging.info("GeminiInteractionLoop.run_main_loop() finishing...")
self.is_running = False # Ensure flag is set for all tasks
self.signal_stop() # Send sentinels again to be sure
self.gemini_session = None
# Clear global queues by setting them to None
video_frames_to_gemini_q = None
audio_chunks_to_gemini_q = None
audio_from_gemini_playback_q = None
logging.info("GeminiInteractionLoop finished and global queues set to None.")
# --- WebRTC Media Processors ---
class VideoProcessor(VideoProcessorBase):
def __init__(self):
self.frame_counter = 0
self.last_gemini_send_time = time.monotonic()
async def _process_and_queue_frame_async(self, frame_ndarray):
if video_frames_to_gemini_q is None: return
self.frame_counter += 1
current_time = time.monotonic()
if (current_time - self.last_gemini_send_time) < (1.0 / VIDEO_FPS_TO_GEMINI): return
self.last_gemini_send_time = current_time
try:
img_rgb = cv2.cvtColor(frame_ndarray, cv2.COLOR_BGR2RGB)
pil_img = PIL.Image.fromarray(img_rgb)
pil_img.thumbnail(VIDEO_API_RESIZE)
image_io = io.BytesIO()
pil_img.save(image_io, format="jpeg")
image_bytes = image_io.getvalue()
api_data = {"mime_type": "image/jpeg", "data": base64.b64encode(image_bytes).decode()}
if video_frames_to_gemini_q.full():
try: await asyncio.wait_for(video_frames_to_gemini_q.get(), timeout=0.01)
except asyncio.TimeoutError: logging.warning("Video queue full, frame dropped."); return
video_frames_to_gemini_q.put_nowait(api_data)
except Exception as e: logging.error(f"Error processing/queueing video frame: {e}", exc_info=True)
async def recv(self, frame):
img_bgr = frame.to_ndarray(format="bgr24")
try:
loop = asyncio.get_running_loop()
loop.create_task(self._process_and_queue_frame_async(img_bgr))
except RuntimeError: logging.error("VideoProcessor.recv: No running asyncio loop in current thread for create_task.")
return frame
class AudioProcessor(AudioProcessorBase):
async def _process_and_queue_audio_async(self, audio_frames):
if audio_chunks_to_gemini_q is None: return
for frame in audio_frames:
audio_data = frame.planes[0].to_bytes()
# Note: Ensure this mime_type and the actual audio data format (sample rate, channels, bit depth)
# are compatible with what the Gemini Live API expects for PCM audio.
mime_type = f"audio/L16;rate={frame.sample_rate};channels={frame.layout.channels}"
api_data = {"data": audio_data, "mime_type": mime_type}
try:
if audio_chunks_to_gemini_q.full():
try: await asyncio.wait_for(audio_chunks_to_gemini_q.get(), timeout=0.01)
except asyncio.TimeoutError: logging.warning("Audio queue full, chunk dropped."); continue
audio_chunks_to_gemini_q.put_nowait(api_data)
except Exception as e: logging.error(f"Error queueing audio chunk: {e}", exc_info=True)
async def recv(self, frames):
try:
loop = asyncio.get_running_loop()
loop.create_task(self._process_and_queue_audio_async(frames))
except RuntimeError: logging.error("AudioProcessor.recv: No running asyncio loop in current thread for create_task.")
return frames
# --- Streamlit UI and Application Logic ---
def initialize_app_session_state():
defaults = {
'gemini_session_active': False,
'gemini_loop_instance': None,
'chat_messages': [],
'webrtc_component_key': f"webrtc_streamer_key_{int(time.time())}",
}
for key, value in defaults.items():
if key not in st.session_state:
st.session_state[key] = value
def run_streamlit_app():
st.set_page_config(page_title="Live AI Medical Assistant (HF Spaces)", layout="wide")
initialize_app_session_state()
st.title("Live AI Medical Assistant")
st.markdown("Utilizing Gemini Live API via WebRTC on Hugging Face Spaces")
st.info("Remember: This AI cannot provide medical diagnoses. Always consult a healthcare professional for medical advice.")
with st.sidebar:
st.header("Session Control")
if not st.session_state.gemini_session_active:
if st.button("π Start Session", type="primary", use_container_width=True, key="start_session_btn"):
st.session_state.gemini_session_active = True
st.session_state.chat_messages = [{"role": "system", "content": "Assistant activating. Please allow camera/microphone access in your browser if prompted."}]
gemini_loop = GeminiInteractionLoop()
st.session_state.gemini_loop_instance = gemini_loop
threading.Thread(target=lambda: asyncio.run(gemini_loop.run_main_loop()), name="GeminiLoopThread", daemon=True).start()
st.success("Gemini session starting... WebRTC will attempt to connect.")
st.session_state.webrtc_component_key = f"webrtc_streamer_key_{int(time.time())}"
st.rerun()
else:
if st.button("π Stop Session", type="secondary", use_container_width=True, key="stop_session_btn"):
if st.session_state.gemini_loop_instance:
st.session_state.gemini_loop_instance.signal_stop()
st.session_state.gemini_loop_instance = None
st.session_state.gemini_session_active = False
st.warning("Session stopping...")
time.sleep(0.5)
st.rerun()
if st.session_state.gemini_session_active:
st.subheader("Your Live Feed (from your browser)")
MEDIA_STREAM_CONSTRAINTS = {
"video": True,
"audio": {
"sampleRate": {"ideal": WEBRTC_REQUESTED_SEND_SAMPLE_RATE},
"channelCount": {"exact": WEBRTC_REQUESTED_AUDIO_CHANNELS},
"echoCancellation": True,
"noiseSuppression": True
}
}
webrtc_ctx = webrtc_streamer(
key=st.session_state.webrtc_component_key,
mode=WebRtcMode.SENDONLY,
rtc_configuration={
"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
},
media_stream_constraints=MEDIA_STREAM_CONSTRAINTS,
video_processor_factory=VideoProcessor,
audio_processor_factory=AudioProcessor,
async_processing=True,
)
if webrtc_ctx.state.playing:
st.caption("WebRTC connected. Streaming your camera and microphone.")
elif st.session_state.gemini_session_active: # Check if session is supposed to be active
st.caption("WebRTC attempting to connect. Ensure camera/microphone permissions are granted in your browser.")
if hasattr(webrtc_ctx.state, 'error') and webrtc_ctx.state.error:
st.error(f"WebRTC Connection Error: {webrtc_ctx.state.error}")
else:
st.info("Click 'Start Session' in the sidebar to enable the live feed and assistant.")
st.subheader("Chat with Assistant")
chat_container = st.container()
with chat_container:
messages = st.session_state.get('chat_messages', [])
for msg in messages:
with st.chat_message(msg["role"]):
st.write(msg["content"])
user_chat_input = st.chat_input(
"Type your message...",
key="user_chat_input_box",
disabled=not st.session_state.gemini_session_active
)
if user_chat_input:
current_messages = st.session_state.get('chat_messages', [])
current_messages.append({"role": "user", "content": user_chat_input})
st.session_state.chat_messages = current_messages
loop_instance = st.session_state.get('gemini_loop_instance')
if loop_instance and loop_instance.async_event_loop and loop_instance.gemini_session:
if loop_instance.async_event_loop.is_running():
future = asyncio.run_coroutine_threadsafe(
loop_instance.send_text_input_to_gemini(user_chat_input),
loop_instance.async_event_loop
)
try: future.result(timeout=2)
except TimeoutError: logging.warning("Timed out waiting for send_text_input_to_gemini confirmation.")
except Exception as e: logging.error(f"Error calling send_text_input_to_gemini: {e}", exc_info=True)
else: st.error("Session event loop is not running. Cannot send message.")
elif not loop_instance or not st.session_state.gemini_session_active:
st.error("Session is not active. Please start a session to send messages.")
else: st.warning("Session components not fully ready. Please wait a moment.")
st.rerun()
if __name__ == "__main__":
if client is None:
logging.critical("Gemini client could not be initialized. Application cannot start.")
else:
run_streamlit_app() |