AhmadMustafa commited on
Commit
c5f736c
·
verified ·
1 Parent(s): 1336ceb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +134 -16
app.py CHANGED
@@ -1,42 +1,160 @@
1
  import gradio as gr
2
  from ultralytics import YOLO
 
 
3
 
4
  model = YOLO("yolo11n.pt")
5
 
6
- def create_minimal_chat_interface():
7
- """Create a stripped-down minimal chat interface for debugging."""
 
8
 
9
  with gr.Blocks() as demo:
10
- # Basic chatbot with no additional parameters
11
  chatbot = gr.Chatbot(
12
  show_label=False,
13
  )
14
 
15
- # Simple textbox for input
16
  msg = gr.Textbox(
17
  show_label=False,
18
  placeholder="Type your message here..."
19
  )
20
 
21
- def respond(message, chat_history):
22
- """Simple echo function for testing."""
23
- chat_history.append((message, f"You said: {message}"))
24
- return "", chat_history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
 
26
  msg.submit(
27
  respond,
28
- [msg, chatbot],
 
 
 
 
 
 
 
 
 
29
  [msg, chatbot],
30
  )
31
 
32
- # Simple load function with no request parameter
33
- def on_load():
34
- chatbot_value = [(None, "Welcome! This is a minimal test interface.")]
35
- return chatbot_value
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  demo.load(
38
- on_load,
39
  inputs=None,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  outputs=[chatbot],
41
  )
42
 
@@ -44,5 +162,5 @@ def create_minimal_chat_interface():
44
 
45
  # Launch the application
46
  if __name__ == "__main__":
47
- app = create_minimal_chat_interface()
48
- app.launch()
 
1
  import gradio as gr
2
  from ultralytics import YOLO
3
+ from typing import List
4
+ import time
5
 
6
  model = YOLO("yolo11n.pt")
7
 
8
+
9
+ def create_chat_interface():
10
+ """Create a minimal chat interface that mirrors the original structure."""
11
 
12
  with gr.Blocks() as demo:
13
+ # Match the original chatbot structure
14
  chatbot = gr.Chatbot(
15
  show_label=False,
16
  )
17
 
 
18
  msg = gr.Textbox(
19
  show_label=False,
20
  placeholder="Type your message here..."
21
  )
22
 
23
+ # Keep all the original state objects
24
+ transcript_processor_state = gr.State()
25
+ call_id_state = gr.State()
26
+ colab_id_state = gr.State()
27
+ origin_state = gr.State()
28
+ ct_state = gr.State()
29
+ turl_state = gr.State()
30
+ uid_state = gr.State()
31
+
32
+ # Keep the streaming functionality
33
+ def respond(
34
+ message: str,
35
+ chat_history: List,
36
+ transcript_processor,
37
+ cid,
38
+ rsid,
39
+ origin,
40
+ ct,
41
+ uid,
42
+ ):
43
+ if not transcript_processor:
44
+ bot_message = "Transcript processor not initialized."
45
+ chat_history.append((message, bot_message))
46
+ return "", chat_history
47
+
48
+ chat_history.append((message, ""))
49
+ # Simulate streaming with a simple loop
50
+ for i in range(5):
51
+ partial_response = f"Processing... {i+1}/5"
52
+ chat_history[-1] = (message, partial_response)
53
+ yield "", chat_history
54
+ time.sleep(0.3)
55
+
56
+ # Final response
57
+ final_response = f"Processed message: {message}\nWith call_id: {cid}"
58
+ chat_history[-1] = (message, final_response)
59
+ yield "", chat_history
60
 
61
+ # Keep the exact same function call structure
62
  msg.submit(
63
  respond,
64
+ [
65
+ msg,
66
+ chatbot,
67
+ transcript_processor_state,
68
+ call_id_state,
69
+ colab_id_state,
70
+ origin_state,
71
+ ct_state,
72
+ uid_state,
73
+ ],
74
  [msg, chatbot],
75
  )
76
 
77
+ # Match the original on_app_load function
78
+ def on_app_load(request: gr.Request):
79
+ # Simplified parameter handling
80
+ cid = "test_cid"
81
+ rsid = "test_rsid"
82
+ origin = "test_origin"
83
+ ct = "test_ct"
84
+ turl = "test_turl"
85
+ uid = "test_uid"
86
+
87
+ # Create a dummy transcript processor
88
+ transcript_processor = {"initialized": True}
89
+
90
+ # Initialize with welcome message
91
+ chatbot_value = [(None, "Welcome to the debug interface")]
92
+
93
+ return [
94
+ chatbot_value,
95
+ transcript_processor,
96
+ cid,
97
+ rsid,
98
+ origin,
99
+ ct,
100
+ turl,
101
+ uid,
102
+ ]
103
 
104
+ def display_processing_message(chatbot_value):
105
+ """Display the processing message while maintaining state."""
106
+ # Create new chatbot value with processing message
107
+ new_chatbot_value = [
108
+ (None, "Processing... Please wait...")
109
+ ]
110
+ return new_chatbot_value
111
+
112
+ def stream_initial_analysis(
113
+ chatbot_value, transcript_processor, cid, rsid, origin, ct, uid
114
+ ):
115
+ if not transcript_processor:
116
+ return chatbot_value
117
+
118
+ # Simulate streaming with a simple loop
119
+ for i in range(3):
120
+ # Update the existing message
121
+ chatbot_value[0] = (None, f"Initial analysis step {i+1}/3...")
122
+ yield chatbot_value
123
+ time.sleep(0.5)
124
+
125
+ # Final message
126
+ chatbot_value[0] = (None, "Ready to chat! Call ID: " + cid)
127
+ yield chatbot_value
128
+
129
+ # Keep the exact same load chain
130
  demo.load(
131
+ on_app_load,
132
  inputs=None,
133
+ outputs=[
134
+ chatbot,
135
+ transcript_processor_state,
136
+ call_id_state,
137
+ colab_id_state,
138
+ origin_state,
139
+ ct_state,
140
+ turl_state,
141
+ uid_state,
142
+ ],
143
+ ).then(
144
+ display_processing_message,
145
+ inputs=[chatbot],
146
+ outputs=[chatbot],
147
+ ).then(
148
+ stream_initial_analysis,
149
+ inputs=[
150
+ chatbot,
151
+ transcript_processor_state,
152
+ call_id_state,
153
+ colab_id_state,
154
+ origin_state,
155
+ ct_state,
156
+ uid_state,
157
+ ],
158
  outputs=[chatbot],
159
  )
160
 
 
162
 
163
  # Launch the application
164
  if __name__ == "__main__":
165
+ app = create_chat_interface()
166
+ app.launch()