IAMTFRMZA commited on
Commit
e1b59aa
·
verified ·
1 Parent(s): 74c6fff

Create realtime_transcriber.py

Browse files
Files changed (1) hide show
  1. realtime_transcriber.py +103 -0
realtime_transcriber.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from websockets import connect, Data, ClientConnection
3
+ import json
4
+ import numpy as np
5
+ import base64
6
+ import soundfile as sf
7
+ import io
8
+ from pydub import AudioSegment
9
+
10
+ # Load OpenAI API key from environment (dotenv is optional)
11
+ import os
12
+ from dotenv import load_dotenv
13
+ load_dotenv()
14
+
15
+ OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
16
+ if not OPENAI_API_KEY:
17
+ raise ValueError("OPENAI_API_KEY environment variable must be set")
18
+
19
+ WEBSOCKET_URI = "wss://api.openai.com/v1/realtime?intent=transcription"
20
+ WEBSOCKET_HEADERS = {
21
+ "Authorization": "Bearer " + OPENAI_API_KEY,
22
+ "OpenAI-Beta": "realtime=v1"
23
+ }
24
+
25
+ connections = {}
26
+
27
+ class WebSocketClient:
28
+ def __init__(self, uri: str, headers: dict, client_id: str):
29
+ self.uri = uri
30
+ self.headers = headers
31
+ self.websocket: ClientConnection = None
32
+ self.queue = asyncio.Queue(maxsize=10)
33
+ self.loop = None
34
+ self.client_id = client_id
35
+ self.transcript = ""
36
+
37
+ async def connect(self):
38
+ try:
39
+ self.websocket = await connect(self.uri, additional_headers=self.headers)
40
+ print("✅ Connected to OpenAI WebSocket")
41
+
42
+ with open("openai_transcription_settings.json", "r") as f:
43
+ settings = f.read()
44
+ await self.websocket.send(settings)
45
+
46
+ await asyncio.gather(self.receive_messages(), self.send_audio_chunks())
47
+ except Exception as e:
48
+ print(f"❌ WebSocket error: {e}")
49
+
50
+ def run(self):
51
+ self.loop = asyncio.new_event_loop()
52
+ asyncio.set_event_loop(self.loop)
53
+ self.loop.run_until_complete(self.connect())
54
+
55
+ def process_websocket_message(self, message: Data):
56
+ message_object = json.loads(message)
57
+ if message_object["type"] != "error":
58
+ if message_object["type"] == "conversation.item.input_audio_transcription.delta":
59
+ delta = message_object["delta"]
60
+ self.transcript += delta
61
+ elif message_object["type"] == "conversation.item.input_audio_transcription.completed":
62
+ self.transcript += ' ' if len(self.transcript) and self.transcript[-1] != ' ' else ''
63
+ else:
64
+ print(f"⚠️ Error received: {message}")
65
+
66
+ async def send_audio_chunks(self):
67
+ while True:
68
+ sample_rate, audio_array = await self.queue.get()
69
+ if self.websocket:
70
+ if audio_array.ndim > 1:
71
+ audio_array = audio_array.mean(axis=1)
72
+ audio_array = audio_array.astype(np.float32)
73
+ audio_array /= np.max(np.abs(audio_array)) if np.max(np.abs(audio_array)) > 0 else 1.0
74
+ audio_array_int16 = (audio_array * 32767).astype(np.int16)
75
+
76
+ buffer = io.BytesIO()
77
+ sf.write(buffer, audio_array_int16, sample_rate, format='WAV', subtype='PCM_16')
78
+ buffer.seek(0)
79
+ segment = AudioSegment.from_file(buffer, format="wav")
80
+ resampled = segment.set_frame_rate(24000)
81
+
82
+ out_buf = io.BytesIO()
83
+ resampled.export(out_buf, format="wav")
84
+ out_buf.seek(0)
85
+
86
+ b64_audio = base64.b64encode(out_buf.read()).decode("utf-8")
87
+ await self.websocket.send(json.dumps({
88
+ "type": "input_audio_buffer.append",
89
+ "audio": b64_audio
90
+ }))
91
+
92
+ async def receive_messages(self):
93
+ async for message in self.websocket:
94
+ self.process_websocket_message(message)
95
+
96
+ def enqueue_audio_chunk(self, sample_rate: int, chunk_array: np.ndarray):
97
+ if not self.queue.full():
98
+ asyncio.run_coroutine_threadsafe(self.queue.put((sample_rate, chunk_array)), self.loop)
99
+
100
+ async def close(self):
101
+ if self.websocket:
102
+ await self.websocket.close()
103
+ connections.pop(self.client_id)