IAMTFRMZA commited on
Commit
33aa59b
·
verified ·
1 Parent(s): 7a11f04

Create realtime_transcriber.py

Browse files
Files changed (1) hide show
  1. realtime_transcriber.py +103 -0
realtime_transcriber.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from websockets import connect, Data, ClientConnection
3
+ import json
4
+ import numpy as np
5
+ import base64
6
+ import soundfile as sf
7
+ import io
8
+ from pydub import AudioSegment
9
+ import os
10
+
11
+ # Load OpenAI API key
12
+ from dotenv import load_dotenv
13
+ load_dotenv()
14
+
15
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
16
+ if not OPENAI_API_KEY:
17
+ raise ValueError("OPENAI_API_KEY must be set in environment")
18
+
19
+ WEBSOCKET_URI = "wss://api.openai.com/v1/realtime?intent=transcription"
20
+ WEBSOCKET_HEADERS = {
21
+ "Authorization": f"Bearer {OPENAI_API_KEY}",
22
+ "OpenAI-Beta": "realtime=v1"
23
+ }
24
+
25
+ # Shared client registry
26
+ connections = {}
27
+
28
+ class WebSocketClient:
29
+ def __init__(self, uri: str, headers: dict, client_id: str):
30
+ self.uri = uri
31
+ self.headers = headers
32
+ self.websocket: ClientConnection = None
33
+ self.queue = asyncio.Queue(maxsize=10)
34
+ self.loop = None
35
+ self.client_id = client_id
36
+ self.transcript = ""
37
+
38
+ async def connect(self):
39
+ try:
40
+ self.websocket = await connect(self.uri, additional_headers=self.headers)
41
+ print(f"✅ Connected to OpenAI WebSocket")
42
+
43
+ # Send transcription session settings
44
+ with open("openai_transcription_settings.json", "r") as f:
45
+ settings = f.read()
46
+ await self.websocket.send(settings)
47
+
48
+ await asyncio.gather(self.receive_messages(), self.send_audio_chunks())
49
+ except Exception as e:
50
+ print(f"❌ WebSocket Error: {e}")
51
+
52
+ def run(self):
53
+ self.loop = asyncio.new_event_loop()
54
+ asyncio.set_event_loop(self.loop)
55
+ self.loop.run_until_complete(self.connect())
56
+
57
+ def process_websocket_message(self, message: Data):
58
+ try:
59
+ message_object = json.loads(message)
60
+ if message_object["type"] == "conversation.item.input_audio_transcription.delta":
61
+ delta = message_object["delta"]
62
+ self.transcript += delta
63
+ elif message_object["type"] == "conversation.item.input_audio_transcription.completed":
64
+ self.transcript += ' ' if self.transcript and self.transcript[-1] != ' ' else ''
65
+ except Exception as e:
66
+ print(f"⚠️ Error processing message: {e}")
67
+
68
+ async def send_audio_chunks(self):
69
+ while True:
70
+ sample_rate, audio_array = await self.queue.get()
71
+
72
+ if self.websocket:
73
+ if audio_array.ndim > 1:
74
+ audio_array = audio_array.mean(axis=1)
75
+
76
+ audio_array = audio_array.astype(np.float32)
77
+ audio_array /= np.max(np.abs(audio_array)) if np.max(np.abs(audio_array)) > 0 else 1.0
78
+ int_audio = (audio_array * 32767).astype(np.int16)
79
+
80
+ buffer = io.BytesIO()
81
+ sf.write(buffer, int_audio, sample_rate, format="WAV", subtype="PCM_16")
82
+ buffer.seek(0)
83
+
84
+ audio_segment = AudioSegment.from_file(buffer, format="wav")
85
+ resampled = audio_segment.set_frame_rate(24000)
86
+
87
+ out_buf = io.BytesIO()
88
+ resampled.export(out_buf, format="wav")
89
+ out_buf.seek(0)
90
+
91
+ b64_audio = base64.b64encode(out_buf.read()).decode("utf-8")
92
+ await self.websocket.send(json.dumps({
93
+ "type": "input_audio_buffer.append",
94
+ "audio": b64_audio
95
+ }))
96
+
97
+ async def receive_messages(self):
98
+ async for message in self.websocket:
99
+ self.process_websocket_message(message)
100
+
101
+ def enqueue_audio_chunk(self, sample_rate: int, chunk_array: np.ndarray):
102
+ if not self.queue.full():
103
+ asyncio.run_coroutine_threadsafe(self.queue.put((sample_rate, chunk_array)), self.loop)