IAMTFRMZA commited on
Commit
b106fa1
·
verified ·
1 Parent(s): 51a57c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -136
app.py CHANGED
@@ -1,47 +1,18 @@
1
  import gradio as gr
 
 
2
  import asyncio
3
- from websockets import connect, Data, ClientConnection
4
- from dotenv import load_dotenv
5
- import json
6
- import os
7
  import threading
8
- import numpy as np
9
  import base64
10
- import soundfile as sf
11
  import io
 
12
  from pydub import AudioSegment
13
- import time
14
- import uuid
15
-
16
- # =========================
17
- # Setup & Configuration
18
- # =========================
19
-
20
- class LogColors:
21
- OK = '\033[94m'
22
- SUCCESS = '\033[92m'
23
- WARNING = '\033[93m'
24
- ERROR = '\033[91m'
25
- ENDC = '\033[0m'
26
-
27
- load_dotenv()
28
- OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
29
- if not OPENAI_API_KEY:
30
- raise ValueError("OPENAI_API_KEY environment variable must be set")
31
-
32
- WEBSOCKET_URI = "wss://api.openai.com/v1/realtime?intent=transcription"
33
- WEBSOCKET_HEADERS = {
34
- "Authorization": "Bearer " + OPENAI_API_KEY,
35
- "OpenAI-Beta": "realtime=v1"
36
- }
37
-
38
- css = ""
39
- connections = {}
40
-
41
- # =========================
42
- # WebSocket Client Class
43
- # =========================
44
 
 
45
  class WebSocketClient:
46
  def __init__(self, uri: str, headers: dict, client_id: str):
47
  self.uri = uri
@@ -53,17 +24,10 @@ class WebSocketClient:
53
  self.transcript = ""
54
 
55
  async def connect(self):
56
- try:
57
- self.websocket = await connect(self.uri, additional_headers=self.headers)
58
- print(f"{LogColors.SUCCESS}Connected to OpenAI WebSocket{LogColors.ENDC}\n")
59
-
60
- with open("openai_transcription_settings.json", "r") as f:
61
- settings = f.read()
62
- await self.websocket.send(settings)
63
-
64
- await asyncio.gather(self.receive_messages(), self.send_audio_chunks())
65
- except Exception as e:
66
- print(f"{LogColors.ERROR}WebSocket Connection Error: {e}{LogColors.ENDC}")
67
 
68
  def run(self):
69
  self.loop = asyncio.new_event_loop()
@@ -71,109 +35,103 @@ class WebSocketClient:
71
  self.loop.run_until_complete(self.connect())
72
 
73
  def process_websocket_message(self, message: Data):
74
- message_object = json.loads(message)
75
- if message_object["type"] != "error":
76
- print(f"{LogColors.OK}Received message: {LogColors.ENDC} {message}")
77
- if message_object["type"] == "conversation.item.input_audio_transcription.delta":
78
- delta = message_object["delta"]
79
- self.transcript += delta
80
- elif message_object["type"] == "conversation.item.input_audio_transcription.completed":
81
- self.transcript += ' ' if self.transcript and self.transcript[-1] != ' ' else ''
82
- else:
83
- print(f"{LogColors.ERROR}Error: {message}{LogColors.ENDC}")
84
 
85
  async def send_audio_chunks(self):
86
  while True:
87
- audio_data = await self.queue.get()
88
- sample_rate, audio_array = audio_data
89
- if self.websocket:
90
- if audio_array.ndim > 1:
91
- audio_array = audio_array.mean(axis=1)
92
- audio_array = audio_array.astype(np.float32)
93
- audio_array /= np.max(np.abs(audio_array)) if np.max(np.abs(audio_array)) > 0 else 1.0
94
- audio_array_int16 = (audio_array * 32767).astype(np.int16)
95
-
96
- audio_buffer = io.BytesIO()
97
- sf.write(audio_buffer, audio_array_int16, sample_rate, format='WAV', subtype='PCM_16')
98
- audio_buffer.seek(0)
99
- audio_segment = AudioSegment.from_file(audio_buffer, format="wav")
100
- resampled_audio = audio_segment.set_frame_rate(24000)
101
-
102
- output_buffer = io.BytesIO()
103
- resampled_audio.export(output_buffer, format="wav")
104
- output_buffer.seek(0)
105
- base64_audio = base64.b64encode(output_buffer.read()).decode("utf-8")
106
-
107
- await self.websocket.send(json.dumps({"type": "input_audio_buffer.append", "audio": base64_audio}))
108
- print(f"{LogColors.OK}Sent audio chunk{LogColors.ENDC}")
109
 
110
  async def receive_messages(self):
111
  async for message in self.websocket:
112
  self.process_websocket_message(message)
113
 
114
- def enqueue_audio_chunk(self, sample_rate: int, chunk_array: np.ndarray):
115
  if not self.queue.full():
116
- asyncio.run_coroutine_threadsafe(self.queue.put((sample_rate, chunk_array)), self.loop)
117
- else:
118
- print(f"{LogColors.WARNING}Queue is full, dropping audio chunk{LogColors.ENDC}")
119
 
120
  async def close(self):
121
  if self.websocket:
122
  await self.websocket.close()
123
- connections.pop(self.client_id)
124
- print(f"{LogColors.WARNING}WebSocket connection closed{LogColors.ENDC}")
125
-
126
 
127
- # =========================
128
- # Helper Functions
129
- # =========================
130
 
131
- def send_audio_chunk(new_chunk: gr.Audio, client_id: str):
132
- if client_id not in connections:
133
- return "Connection is being established, please try again in a few seconds."
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  sr, y = new_chunk
135
- connections[client_id].enqueue_audio_chunk(sr, y)
136
- return connections[client_id].transcript
137
-
138
- def create_new_websocket_connection():
139
- client_id = str(uuid.uuid4())
140
- connections[client_id] = WebSocketClient(WEBSOCKET_URI, WEBSOCKET_HEADERS, client_id)
141
- threading.Thread(target=connections[client_id].run, daemon=True).start()
142
- return client_id
143
-
144
- def clear_transcript(client_id):
145
- if client_id in connections:
146
- connections[client_id].transcript = ""
147
  return ""
148
 
149
- # =========================
150
- # Gradio UI Sections
151
- # =========================
152
-
153
- with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
154
-
155
- with gr.Tab("💬 Chat Assistant"):
156
- gr.Markdown("### Chat Section (Coming Soon)")
157
- gr.Textbox(label="Your question")
158
- gr.Button("Send")
159
-
160
- with gr.Tab("📄 Document Viewer"):
161
- gr.Markdown("### Upload and View Documents")
162
- gr.File(label="Upload Document", file_types=[".pdf", ".txt", ".docx"])
163
- gr.Textbox(label="Document Preview", lines=10)
164
-
165
- with gr.Tab("🎤 Voice Transcription"):
166
- gr.Markdown("### Realtime Voice-to-Text Transcription")
167
- with gr.Row():
168
- output_textbox = gr.Textbox(label="Transcript", lines=7, interactive=False, autoscroll=True)
169
- with gr.Row():
170
- with gr.Column(scale=5):
171
- audio_input = gr.Audio(streaming=True, format="wav")
172
- with gr.Column():
173
- clear_button = gr.Button("Clear Transcript")
174
- client_id = gr.State()
175
- clear_button.click(clear_transcript, inputs=[client_id], outputs=[output_textbox])
176
- audio_input.stream(send_audio_chunk, [audio_input, client_id], [output_textbox], stream_every=0.5)
177
- demo.load(create_new_websocket_connection, outputs=[client_id])
178
-
179
- demo.launch()
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import os
3
+ import uuid
4
  import asyncio
 
 
 
 
5
  import threading
6
+ import json
7
  import base64
8
+ import numpy as np
9
  import io
10
+ import soundfile as sf
11
  from pydub import AudioSegment
12
+ from websockets import connect, Data, ClientConnection
13
+ from dotenv import load_dotenv
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ # ========== WebSocket Client Setup ==========
16
  class WebSocketClient:
17
  def __init__(self, uri: str, headers: dict, client_id: str):
18
  self.uri = uri
 
24
  self.transcript = ""
25
 
26
  async def connect(self):
27
+ self.websocket = await connect(self.uri, additional_headers=self.headers)
28
+ with open("openai_transcription_settings.json", "r") as f:
29
+ await self.websocket.send(f.read())
30
+ await asyncio.gather(self.receive_messages(), self.send_audio_chunks())
 
 
 
 
 
 
 
31
 
32
  def run(self):
33
  self.loop = asyncio.new_event_loop()
 
35
  self.loop.run_until_complete(self.connect())
36
 
37
  def process_websocket_message(self, message: Data):
38
+ msg = json.loads(message)
39
+ if msg["type"] == "conversation.item.input_audio_transcription.delta":
40
+ self.transcript += msg["delta"]
41
+ elif msg["type"] == "conversation.item.input_audio_transcription.completed":
42
+ self.transcript += ' '
 
 
 
 
 
43
 
44
  async def send_audio_chunks(self):
45
  while True:
46
+ sr, audio_array = await self.queue.get()
47
+ if audio_array.ndim > 1:
48
+ audio_array = audio_array.mean(axis=1)
49
+ audio_array = (audio_array / np.max(np.abs(audio_array))) if np.max(np.abs(audio_array)) > 0 else audio_array
50
+ int16 = (audio_array * 32767).astype(np.int16)
51
+ buffer = io.BytesIO()
52
+ sf.write(buffer, int16, sr, format='WAV', subtype='PCM_16')
53
+ buffer.seek(0)
54
+ audio = AudioSegment.from_file(buffer, format="wav").set_frame_rate(24000)
55
+ out = io.BytesIO()
56
+ audio.export(out, format="wav")
57
+ out.seek(0)
58
+ encoded = base64.b64encode(out.read()).decode("utf-8")
59
+ await self.websocket.send(json.dumps({"type": "input_audio_buffer.append", "audio": encoded}))
 
 
 
 
 
 
 
 
60
 
61
  async def receive_messages(self):
62
  async for message in self.websocket:
63
  self.process_websocket_message(message)
64
 
65
+ def enqueue_audio_chunk(self, sr, chunk):
66
  if not self.queue.full():
67
+ asyncio.run_coroutine_threadsafe(self.queue.put((sr, chunk)), self.loop)
 
 
68
 
69
  async def close(self):
70
  if self.websocket:
71
  await self.websocket.close()
 
 
 
72
 
 
 
 
73
 
74
+ # ========== Transcription Helpers ==========
75
+ connections = {}
76
+ load_dotenv()
77
+ OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
78
+ HEADERS = {"Authorization": f"Bearer {OPENAI_API_KEY}", "OpenAI-Beta": "realtime=v1"}
79
+ URI = "wss://api.openai.com/v1/realtime?intent=transcription"
80
+
81
+ def create_websocket():
82
+ cid = str(uuid.uuid4())
83
+ connections[cid] = WebSocketClient(URI, HEADERS, cid)
84
+ threading.Thread(target=connections[cid].run, daemon=True).start()
85
+ return cid
86
+
87
+ def send_audio(new_chunk, cid):
88
+ if cid not in connections:
89
+ return "Starting connection..."
90
  sr, y = new_chunk
91
+ connections[cid].enqueue_audio_chunk(sr, y)
92
+ return connections[cid].transcript
93
+
94
+ def clear_transcript(cid):
95
+ if cid in connections:
96
+ connections[cid].transcript = ""
 
 
 
 
 
 
97
  return ""
98
 
99
+ # ========== Gradio UI Layout ==========
100
+ with gr.Blocks(theme=gr.themes.Soft()) as app:
101
+
102
+ gr.Markdown("# 🧠 Document AI Assistant with Voice & Viewer")
103
+
104
+ # State
105
+ client_id = gr.State()
106
+
107
+ # Layout
108
+ with gr.Row():
109
+ # 🟢 Chat Section (Main)
110
+ with gr.Column(scale=2):
111
+ chatbot = gr.Chatbot(label="Chat Assistant")
112
+ msg = gr.Textbox(label="Ask something", placeholder="e.g., Summarize this document...")
113
+ send_btn = gr.Button("Send")
114
+
115
+ def chat_response(user_msg, history):
116
+ history = history or []
117
+ reply = f"🤖 This is a placeholder reply to: {user_msg}"
118
+ history.append((user_msg, reply))
119
+ return "", history
120
+
121
+ send_btn.click(chat_response, inputs=[msg, chatbot], outputs=[msg, chatbot])
122
+
123
+ # Smaller widgets section
124
+ with gr.Column(scale=1):
125
+ # 🟡 Image Viewer
126
+ viewer = gr.Image(label="📄 Document Viewer", type="filepath")
127
+
128
+ # 🔵 Voice Transcription
129
+ transcript = gr.Textbox(label="🎤 Transcript", lines=5, interactive=False)
130
+ audio = gr.Audio(label="🎙️ Audio", streaming=True)
131
+ clear = gr.Button("Clear Transcript")
132
+
133
+ audio.stream(fn=send_audio, inputs=[audio, client_id], outputs=transcript, stream_every=0.5)
134
+ clear.click(fn=clear_transcript, inputs=[client_id], outputs=transcript)
135
+ app.load(create_websocket, outputs=client_id)
136
+
137
+ app.launch()