IAMTFRMZA commited on
Commit
d13b654
·
verified ·
1 Parent(s): f310bae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -74
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import gradio as gr
2
  import os, time, re, json, base64, asyncio, threading, uuid, io
3
  import numpy as np
@@ -17,6 +18,7 @@ HEADERS = {"Authorization": f"Bearer {OPENAI_API_KEY}", "OpenAI-Beta": "realtime
17
  WS_URI = "wss://api.openai.com/v1/realtime?intent=transcription"
18
  connections = {}
19
 
 
20
  class WebSocketClient:
21
  def __init__(self, uri, headers, client_id):
22
  self.uri = uri
@@ -69,6 +71,7 @@ class WebSocketClient:
69
  if data["type"] == "conversation.item.input_audio_transcription.delta":
70
  self.transcript += data["delta"]
71
 
 
72
  def create_ws():
73
  cid = str(uuid.uuid4())
74
  client = WebSocketClient(WS_URI, HEADERS, cid)
@@ -80,16 +83,15 @@ def send_audio(chunk, cid):
80
  if not cid or cid not in connections:
81
  return "Connecting..."
82
  sr, arr = chunk
83
- if len(connections[cid].transcript) > 1000:
84
- connections[cid].transcript = ""
85
  connections[cid].enqueue_audio_chunk(sr, arr)
86
- return connections[cid].transcript.strip()
87
 
88
  def clear_transcript(cid):
89
  if cid in connections:
90
  connections[cid].transcript = ""
91
  return ""
92
 
 
93
  def handle_chat(user_input, history, thread_id, image_url):
94
  if not OPENAI_API_KEY or not ASSISTANT_ID:
95
  return "❌ Missing secrets!", history, thread_id, image_url
@@ -111,8 +113,7 @@ def handle_chat(user_input, history, thread_id, image_url):
111
  for msg in reversed(msgs.data):
112
  if msg.role == "assistant":
113
  content = msg.content[0].text.value
114
- history.append({"role": "user", "content": user_input})
115
- history.append({"role": "assistant", "content": content})
116
  match = re.search(
117
  r'https://raw\.githubusercontent\.com/AndrewLORTech/surgical-pathology-manual/main/[\w\-/]*\.png',
118
  content
@@ -125,98 +126,45 @@ def handle_chat(user_input, history, thread_id, image_url):
125
  except Exception as e:
126
  return f"❌ {e}", history, thread_id, image_url
127
 
128
- def send_transcript_to_assistant(transcript, history, thread_id, image_url, cid):
129
- if not transcript.strip():
130
- return gr.update(), history, thread_id, image_url
131
- if cid in connections:
132
- connections[cid].transcript = ""
133
- return handle_chat(transcript, history, thread_id, image_url)
134
-
135
- def clear_chat_and_transcript(client_id):
136
- if client_id in connections:
137
- connections[client_id].transcript = ""
138
- return [], "", None, None
139
-
140
- # Fix image viewer fallback
141
- def update_image_display(image_url):
142
- if image_url and isinstance(image_url, str) and image_url.startswith("http"):
143
- return image_url
144
- return None
145
-
146
- # UI
147
  with gr.Blocks(theme=gr.themes.Soft()) as app:
148
  gr.Markdown("# 📄 Document AI Assistant")
149
 
150
- gr.HTML("""
151
- <style>
152
- #ask-btn, #clear-chat-btn, #record-audio button {
153
- font-size: 16px !important;
154
- padding: 12px 28px !important;
155
- border-radius: 6px;
156
- margin-top: 10px;
157
- background-color: #4b5563 !important;
158
- color: white !important;
159
- border: 1px solid #9ca3af !important;
160
- }
161
-
162
- #ask-btn:hover, #clear-chat-btn:hover, #record-audio button:hover {
163
- background-color: #6b7280 !important;
164
- color: #fff !important;
165
- }
166
-
167
- button {
168
- margin-right: 8px;
169
- }
170
-
171
- #record-audio button svg {
172
- margin-right: 6px;
173
- }
174
-
175
- #record-audio label {
176
- display: none;
177
- }
178
- </style>
179
- """)
180
-
181
  chat_state = gr.State([])
182
  thread_state = gr.State()
183
  image_state = gr.State()
184
  client_id = gr.State()
 
185
 
186
  with gr.Row(equal_height=True):
187
  with gr.Column(scale=1):
188
  image_display = gr.Image(label="🖼️ Document", type="filepath", show_download_button=False)
189
 
190
- with gr.Column(scale=2):
191
- chat = gr.Chatbot(label="💬 Chat", height=460, type="messages")
 
192
  with gr.Row():
193
  user_prompt = gr.Textbox(placeholder="Ask your question...", show_label=False, scale=6)
 
194
  send_btn = gr.Button("Send", variant="primary", scale=2)
195
 
196
  with gr.Accordion("🎤 Voice Transcription", open=False) as voice_section:
197
- gr.Markdown("**🎙️ Tap below to record your voice**")
198
- voice_input = gr.Audio(label="", streaming=True, elem_id="record-audio")
199
- voice_transcript = gr.Textbox(label="Transcript", lines=2, interactive=False)
200
-
201
  with gr.Row():
202
- ask_btn = gr.Button("🟢 Ask", elem_id="ask-btn")
203
- clear_chat_btn = gr.Button("🧹 Clear Chat", elem_id="clear-chat-btn")
 
204
 
205
  # Functional bindings
 
 
 
 
206
  send_btn.click(fn=handle_chat,
207
  inputs=[user_prompt, chat_state, thread_state, image_state],
208
  outputs=[user_prompt, chat, thread_state, image_state])
209
- image_state.change(fn=update_image_display, inputs=image_state, outputs=image_display)
210
  voice_input.stream(fn=send_audio, inputs=[voice_input, client_id], outputs=voice_transcript, stream_every=0.5)
211
-
212
- ask_btn.click(fn=send_transcript_to_assistant,
213
- inputs=[voice_transcript, chat_state, thread_state, image_state, client_id],
214
- outputs=[user_prompt, chat, thread_state, image_state])
215
-
216
- clear_chat_btn.click(fn=clear_chat_and_transcript,
217
- inputs=[client_id],
218
- outputs=[chat, voice_transcript, thread_state, image_state])
219
-
220
  app.load(fn=create_ws, outputs=[client_id])
221
 
222
- app.launch()
 
1
+ # top of the file
2
  import gradio as gr
3
  import os, time, re, json, base64, asyncio, threading, uuid, io
4
  import numpy as np
 
18
  WS_URI = "wss://api.openai.com/v1/realtime?intent=transcription"
19
  connections = {}
20
 
21
+ # WebSocket Client
22
  class WebSocketClient:
23
  def __init__(self, uri, headers, client_id):
24
  self.uri = uri
 
71
  if data["type"] == "conversation.item.input_audio_transcription.delta":
72
  self.transcript += data["delta"]
73
 
74
+ # Real-time transcription connection manager
75
  def create_ws():
76
  cid = str(uuid.uuid4())
77
  client = WebSocketClient(WS_URI, HEADERS, cid)
 
83
  if not cid or cid not in connections:
84
  return "Connecting..."
85
  sr, arr = chunk
 
 
86
  connections[cid].enqueue_audio_chunk(sr, arr)
87
+ return connections[cid].transcript
88
 
89
  def clear_transcript(cid):
90
  if cid in connections:
91
  connections[cid].transcript = ""
92
  return ""
93
 
94
+ # ============ Chat Assistant ============
95
  def handle_chat(user_input, history, thread_id, image_url):
96
  if not OPENAI_API_KEY or not ASSISTANT_ID:
97
  return "❌ Missing secrets!", history, thread_id, image_url
 
113
  for msg in reversed(msgs.data):
114
  if msg.role == "assistant":
115
  content = msg.content[0].text.value
116
+ history.append((user_input, content))
 
117
  match = re.search(
118
  r'https://raw\.githubusercontent\.com/AndrewLORTech/surgical-pathology-manual/main/[\w\-/]*\.png',
119
  content
 
126
  except Exception as e:
127
  return f"❌ {e}", history, thread_id, image_url
128
 
129
+ # ============ Gradio UI ============
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  with gr.Blocks(theme=gr.themes.Soft()) as app:
131
  gr.Markdown("# 📄 Document AI Assistant")
132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  chat_state = gr.State([])
134
  thread_state = gr.State()
135
  image_state = gr.State()
136
  client_id = gr.State()
137
+ voice_enabled = gr.State(False)
138
 
139
  with gr.Row(equal_height=True):
140
  with gr.Column(scale=1):
141
  image_display = gr.Image(label="🖼️ Document", type="filepath", show_download_button=False)
142
 
143
+ with gr.Column(scale=1.4):
144
+ chat = gr.Chatbot(label="💬 Chat", height=460)
145
+
146
  with gr.Row():
147
  user_prompt = gr.Textbox(placeholder="Ask your question...", show_label=False, scale=6)
148
+ mic_toggle_btn = gr.Button("🎙️", scale=1)
149
  send_btn = gr.Button("Send", variant="primary", scale=2)
150
 
151
  with gr.Accordion("🎤 Voice Transcription", open=False) as voice_section:
 
 
 
 
152
  with gr.Row():
153
+ voice_input = gr.Audio(label="Mic", streaming=True)
154
+ voice_transcript = gr.Textbox(label="Transcript", lines=2, interactive=False)
155
+ clear_btn = gr.Button("🧹 Clear Transcript")
156
 
157
  # Functional bindings
158
+ def toggle_voice(curr):
159
+ return not curr, gr.update(visible=not curr)
160
+
161
+ mic_toggle_btn.click(fn=toggle_voice, inputs=voice_enabled, outputs=[voice_enabled, voice_section])
162
  send_btn.click(fn=handle_chat,
163
  inputs=[user_prompt, chat_state, thread_state, image_state],
164
  outputs=[user_prompt, chat, thread_state, image_state])
165
+ image_state.change(fn=lambda x: x, inputs=image_state, outputs=image_display)
166
  voice_input.stream(fn=send_audio, inputs=[voice_input, client_id], outputs=voice_transcript, stream_every=0.5)
167
+ clear_btn.click(fn=clear_transcript, inputs=[client_id], outputs=voice_transcript)
 
 
 
 
 
 
 
 
168
  app.load(fn=create_ws, outputs=[client_id])
169
 
170
+ app.launch()