Spaces:
Sleeping
Sleeping
import gradio as gr | |
from ultralytics import YOLO | |
from typing import List | |
import time | |
model = YOLO("yolo11n.pt") | |
def create_chat_interface(): | |
"""Create a minimal chat interface that mirrors the original structure.""" | |
with gr.Blocks() as demo: | |
# Match the original chatbot structure | |
chatbot = gr.Chatbot( | |
show_label=False, | |
) | |
msg = gr.Textbox( | |
show_label=False, | |
placeholder="Type your message here..." | |
) | |
# Keep all the original state objects | |
transcript_processor_state = gr.State() | |
call_id_state = gr.State() | |
colab_id_state = gr.State() | |
origin_state = gr.State() | |
ct_state = gr.State() | |
turl_state = gr.State() | |
uid_state = gr.State() | |
# Keep the streaming functionality | |
def respond( | |
message: str, | |
chat_history: List, | |
transcript_processor, | |
cid, | |
rsid, | |
origin, | |
ct, | |
uid, | |
): | |
if not transcript_processor: | |
bot_message = "Transcript processor not initialized." | |
chat_history.append((message, bot_message)) | |
return "", chat_history | |
chat_history.append((message, "")) | |
# Simulate streaming with a simple loop | |
for i in range(5): | |
partial_response = f"Processing... {i+1}/5" | |
chat_history[-1] = (message, partial_response) | |
yield "", chat_history | |
time.sleep(0.3) | |
# Final response | |
final_response = f"Processed message: {message}\nWith call_id: {cid}" | |
chat_history[-1] = (message, final_response) | |
yield "", chat_history | |
# Keep the exact same function call structure | |
msg.submit( | |
respond, | |
[ | |
msg, | |
chatbot, | |
transcript_processor_state, | |
call_id_state, | |
colab_id_state, | |
origin_state, | |
ct_state, | |
uid_state, | |
], | |
[msg, chatbot], | |
) | |
# Match the original on_app_load function | |
def on_app_load(request: gr.Request): | |
# Simplified parameter handling | |
cid = "test_cid" | |
rsid = "test_rsid" | |
origin = "test_origin" | |
ct = "test_ct" | |
turl = "test_turl" | |
uid = "test_uid" | |
# Create a dummy transcript processor | |
transcript_processor = {"initialized": True} | |
# Initialize with welcome message | |
chatbot_value = [(None, "Welcome to the debug interface")] | |
return [ | |
chatbot_value, | |
transcript_processor, | |
cid, | |
rsid, | |
origin, | |
ct, | |
turl, | |
uid, | |
] | |
def display_processing_message(chatbot_value): | |
"""Display the processing message while maintaining state.""" | |
# Create new chatbot value with processing message | |
new_chatbot_value = [ | |
(None, "Processing... Please wait...") | |
] | |
return new_chatbot_value | |
def stream_initial_analysis( | |
chatbot_value, transcript_processor, cid, rsid, origin, ct, uid | |
): | |
if not transcript_processor: | |
return chatbot_value | |
# Simulate streaming with a simple loop | |
for i in range(3): | |
# Update the existing message | |
chatbot_value[0] = (None, f"Initial analysis step {i+1}/3...") | |
yield chatbot_value | |
time.sleep(0.5) | |
# Final message | |
chatbot_value[0] = (None, "Ready to chat! Call ID: " + cid) | |
yield chatbot_value | |
# Keep the exact same load chain | |
demo.load( | |
on_app_load, | |
inputs=None, | |
outputs=[ | |
chatbot, | |
transcript_processor_state, | |
call_id_state, | |
colab_id_state, | |
origin_state, | |
ct_state, | |
turl_state, | |
uid_state, | |
], | |
).then( | |
display_processing_message, | |
inputs=[chatbot], | |
outputs=[chatbot], | |
).then( | |
stream_initial_analysis, | |
inputs=[ | |
chatbot, | |
transcript_processor_state, | |
call_id_state, | |
colab_id_state, | |
origin_state, | |
ct_state, | |
uid_state, | |
], | |
outputs=[chatbot], | |
) | |
return demo | |
# Launch the application | |
if __name__ == "__main__": | |
app = create_chat_interface() | |
app.launch() |