Sofia Casadei commited on
Commit
e0c1694
Β·
1 Parent(s): 2e32fcb

fix: connection, add: language via env var

Browse files
index-screen.html DELETED
@@ -1,632 +0,0 @@
1
- <!DOCTYPE html>
2
- <html lang="en">
3
-
4
- <head>
5
- <meta charset="UTF-8">
6
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
7
- <title>Real-time Whisper Transcription</title>
8
- <style>
9
- :root {
10
- --background-dark: #000000;
11
- --text-light: #ffffff;
12
- }
13
-
14
- body {
15
- font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
16
- margin: 0; /* Removes default margin */
17
- padding: 0; /* Removes default padding */
18
- background-color: var(--background-dark); /* Sets background to black */
19
- color: var(--text-light); /* Sets text to white */
20
- min-height: 100vh; /* Ensures page fills entire viewport height */
21
- }
22
-
23
- /* Hide the header in presentation mode */
24
- .hero {
25
- display: none; /* Hides the hero section completely */
26
- }
27
-
28
- .container {
29
- max-width: 100%; /* Makes container full width */
30
- margin: 0; /* Removes margin */
31
- padding: 1rem; /* Adds small padding all around */
32
- }
33
-
34
- /* Base styling for transcript container */
35
- .transcript-container {
36
- height: 90vh; /* Sets height to 90% of viewport height */
37
- border: none; /* Removes border */
38
- padding: 2rem; /* Adds generous padding inside */
39
- background: var(--background-dark); /* Ensures background is black */
40
- color: var(--text-light); /* Ensures text is white */
41
- overflow-y: auto; /* Enables vertical scrolling when content overflows */
42
- margin-bottom: 0; /* Removes bottom margin */
43
- display: block; /* Makes element a block to take full width */
44
- width: 100%; /* Sets width to 100% */
45
- }
46
-
47
- /* Styling for transcript paragraphs */
48
- .transcript-container p {
49
- margin: 0.5rem 0; /* Small vertical margin between paragraphs */
50
- padding: 0.5rem 0; /* Small vertical padding within paragraphs */
51
- background: transparent; /* Transparent background (no highlighting) */
52
- border-radius: 0; /* No rounded corners */
53
- line-height: 1.6; /* Increases line spacing for readability */
54
- font-size: 3.5rem; /* rem means relative to the root font size */
55
- font-weight: 500; /* 500 = medium weight, 700 = bold */
56
- max-width: 98%; /* Full width within container */
57
- white-space: normal; /* Allows text to wrap normally */
58
- word-wrap: break-word; /* Prevents overflow of long words */
59
- color: white; /* Explicitly sets text color to white */
60
- display: block; /* Each paragraph takes full width */
61
- }
62
-
63
- /* Current paragraph styling - slightly brighter for emphasis */
64
- .transcript-container p.current {
65
- background: transparent; /* No background color */
66
- color: rgba(255, 255, 255, 1.0); /* Full brightness white for current text */
67
- }
68
-
69
- /* Ensure all paragraphs have full opacity (keeps history visible) */
70
- .transcript-container p:nth-last-child(n+4) {
71
- opacity: 1.0; /* Shows all paragraphs at full opacity */
72
- }
73
-
74
- /* Controls for starting/stopping transcription */
75
- .controls {
76
- position: fixed; /* Fixes controls to viewport */
77
- bottom: 2rem; /* Positions 2rem from bottom */
78
- right: 2rem; /* Positions 2rem from right */
79
- margin: 0; /* No margin */
80
- opacity: 0.8; /* Slightly transparent when not hovered */
81
- transition: opacity 0.3s ease; /* Smooth transition for opacity changes */
82
- z-index: 1000; /* Ensures controls appear above other elements */
83
- }
84
-
85
- .controls:hover {
86
- opacity: 1; /* Full opacity on hover */
87
- }
88
-
89
- /* Button styling - orange with black text for good contrast */
90
- button {
91
- background: rgba(249, 164, 92, 1.0); /* Solid orange background */
92
- backdrop-filter: blur(5px); /* Blur effect for elements behind */
93
- font-size: 1.2rem; /* Large text */
94
- min-width: 160px; /* Minimum width for button */
95
- padding: 15px 30px; /* Generous padding inside button */
96
- color: black !important; /* Forces black text color */
97
- font-weight: bold; /* Bold text for better visibility */
98
- border: 2px solid rgba(255, 255, 255, 0.2); /* Subtle border */
99
- border-radius: 8px; /* Rounded corners */
100
- cursor: pointer; /* Shows pointer cursor on hover */
101
- transition: all 0.2s ease; /* Smooth transition for hover effects */
102
- display: block; /* Makes button take up full width */
103
- }
104
-
105
- button:hover {
106
- background: rgba(249, 164, 92, 0.9); /* Slightly more transparent on hover */
107
- transform: translateY(-2px); /* Slight upward movement on hover */
108
- }
109
-
110
- /* Spinner animation for loading state */
111
- .icon-with-spinner .spinner {
112
- border: 3px solid black; /* Spinner border */
113
- border-top: 3px solid transparent; /* Transparent top creates spinning effect */
114
- border-radius: 50%; /* Makes it circular */
115
- width: 24px; /* Width of spinner */
116
- height: 24px; /* Height of spinner */
117
- animation: spin 1s linear infinite; /* Animation for spinning effect */
118
- }
119
-
120
- @keyframes spin {
121
- 0% { transform: rotate(0deg); } /* Starting rotation */
122
- 100% { transform: rotate(360deg); } /* Full 360Β° rotation */
123
- }
124
-
125
- /* Recording indicator pulse animation */
126
- .pulse-circle {
127
- display: inline-block; /* Allows other elements inline */
128
- width: 12px; /* Width of pulse circle */
129
- height: 12px; /* Height of pulse circle */
130
- border-radius: 50%; /* Makes it circular */
131
- background-color: red; /* Red color for recording indicator */
132
- margin-right: 8px; /* Space to right of circle */
133
- animation: pulse 1.5s ease infinite; /* Continuous pulsing animation */
134
- }
135
-
136
- @keyframes pulse {
137
- 0% { transform: scale(0.95); opacity: 0.7; } /* Slightly smaller and transparent */
138
- 50% { transform: scale(1.1); opacity: 1; } /* Larger and fully opaque */
139
- 100% { transform: scale(0.95); opacity: 0.7; } /* Back to starting state */
140
- }
141
-
142
- /* Custom scrollbar styling */
143
- .transcript-container::-webkit-scrollbar {
144
- width: 8px; /* Width of scrollbar */
145
- }
146
-
147
- .transcript-container::-webkit-scrollbar-track {
148
- background: var(--background-dark); /* Black scrollbar track */
149
- }
150
-
151
- .transcript-container::-webkit-scrollbar-thumb {
152
- background: rgba(249, 164, 92, 0.3); /* Semi-transparent orange scrollbar thumb */
153
- border-radius: 4px; /* Rounded corners on scrollbar thumb */
154
- }
155
-
156
- /* Error toast styling */
157
- .toast {
158
- background: rgba(0, 0, 0, 0.8); /* Semi-transparent black background */
159
- backdrop-filter: blur(5px); /* Blur effect behind toast */
160
- color: var(--text-light); /* White text */
161
- font-size: 1.2rem; /* Large text size */
162
- }
163
- </style>
164
- </head>
165
-
166
- <body>
167
- <!-- Error message container that slides in when needed -->
168
- <div id="error-toast" class="toast"></div>
169
- <!-- Header section (hidden in presentation mode) -->
170
- <div class="hero">
171
- <h1>Real-time Transcription</h1>
172
- <p>Powered by FastRTC and Local Whisper πŸ€—</p>
173
- </div>
174
-
175
- <!-- Main content container -->
176
- <div class="container">
177
- <!-- Container for transcript text -->
178
- <div class="transcript-container" id="transcript"></div>
179
- <!-- Controls for starting/stopping recording -->
180
- <div class="controls">
181
- <button id="start-button">Start Recording</button>
182
- </div>
183
- </div>
184
-
185
- <script>
186
- // Global variables for WebRTC connection
187
- let peerConnection; // Stores the WebRTC connection object for audio streaming
188
- let webrtc_id; // A unique ID to identify this connection on the server
189
- let audioContext, analyser, audioSource; // Audio processing objects for visualization
190
- let audioLevel = 0; // Stores the current audio level (volume) from 0-1
191
- let animationFrame; // Reference to the animation frame for audio visualization
192
- let isRecording = false; // Tracks whether we're currently recording or not
193
- let eventSource; // Object that receives transcription results from the server
194
-
195
- // DOM element references
196
- const startButton = document.getElementById('start-button'); // The button to start/stop recording
197
- const transcriptDiv = document.getElementById('transcript'); // The container for transcription text
198
-
199
- // Variables for managing the transcript display
200
- let currentParagraph = null; // Reference to the current paragraph being updated
201
- let lastUpdateTime = Date.now(); // Timestamp of when we last updated the transcript
202
-
203
- // Show error messages to the user in a toast notification
204
- function showError(message) {
205
- const toast = document.getElementById('error-toast'); // Get the toast element
206
- toast.textContent = message; // Set the error message
207
- toast.style.display = 'block'; // Make the toast visible
208
-
209
- // Hide toast after 5 seconds
210
- setTimeout(() => {
211
- toast.style.display = 'none'; // Hide the toast
212
- }, 5000);
213
- }
214
-
215
- // Handle messages received from the server through WebRTC data channel
216
- function handleMessage(event) {
217
- // Parse JSON message
218
- const eventJson = JSON.parse(event.data);
219
- // Display errors to the user
220
- if (eventJson.type === "error") {
221
- showError(eventJson.message);
222
- }
223
- // Log all messages to console for debugging
224
- console.log('Received message:', event.data);
225
- }
226
-
227
- // Update button appearance based on connection state
228
- function updateButtonState() {
229
- // If connecting, show spinner
230
- if (peerConnection && (peerConnection.connectionState === 'connecting' || peerConnection.connectionState === 'new')) {
231
- startButton.innerHTML = `
232
- <div class="icon-with-spinner">
233
- <div class="spinner"></div>
234
- <span>Connecting...</span>
235
- </div>
236
- `;
237
- isRecording = false; // Not recording while connecting
238
- // If connected, show pulsing recording indicator
239
- } else if (peerConnection && peerConnection.connectionState === 'connected') {
240
- startButton.innerHTML = `
241
- <div class="pulse-container">
242
- <div class="pulse-circle"></div>
243
- <span>Stop Recording</span>
244
- </div>
245
- `;
246
- isRecording = true; // Set recording state to true
247
- // Default state - ready to start
248
- } else {
249
- startButton.innerHTML = 'Start Recording';
250
- isRecording = false; // Not recording when not connected
251
- }
252
- }
253
-
254
- // Set up audio visualization to show when the user is speaking
255
- function setupAudioVisualization(stream) {
256
- // Create or resume the audio context
257
- if (!audioContext) {
258
- // Create new audio context with browser compatibility handling
259
- audioContext = new (window.AudioContext || window.webkitAudioContext)();
260
- } else {
261
- // Resume context if it was suspended
262
- if (audioContext.state === 'suspended') {
263
- audioContext.resume();
264
- }
265
- }
266
-
267
- // Create audio analyzer for processing audio data
268
- analyser = audioContext.createAnalyser();
269
- // Create media source from microphone stream
270
- audioSource = audioContext.createMediaStreamSource(stream);
271
- // Connect source to analyzer
272
- audioSource.connect(analyser);
273
- // Set FFT size (controls frequency data resolution)
274
- analyser.fftSize = 64;
275
- // Create array to store frequency data
276
- const dataArray = new Uint8Array(analyser.frequencyBinCount);
277
-
278
- // Function to continuously update audio level visualization
279
- function updateAudioLevel() {
280
- // Get audio frequency data
281
- analyser.getByteFrequencyData(dataArray);
282
- // Calculate average volume across all frequencies
283
- const average = Array.from(dataArray).reduce((a, b) => a + b, 0) / dataArray.length;
284
- // Convert to 0-1 scale
285
- audioLevel = average / 255;
286
-
287
- // Update pulse circle size based on audio level
288
- const pulseCircle = document.querySelector('.pulse-circle');
289
- if (pulseCircle) {
290
- pulseCircle.style.setProperty('--audio-level', 1 + audioLevel);
291
- }
292
-
293
- // Continue animation loop
294
- animationFrame = requestAnimationFrame(updateAudioLevel);
295
- }
296
- // Start audio visualization loop
297
- updateAudioLevel();
298
- }
299
-
300
- // Set up WebRTC connection for streaming audio to server
301
- async function setupWebRTC() {
302
- // Get WebRTC configuration from global variable
303
- const config = __RTC_CONFIGURATION__;
304
- // Create new peer connection
305
- peerConnection = new RTCPeerConnection(config);
306
-
307
- // Set connection timeout (15 seconds)
308
- const connectionTimeout = setTimeout(() => {
309
- if (peerConnection && peerConnection.connectionState !== 'connected') {
310
- showError('Connection timeout. Please check your network and try again.');
311
- stop(); // Stop connection attempt
312
- }
313
- }, 15000);
314
-
315
- // Set warning for slow connection (5 seconds)
316
- const timeoutId = setTimeout(() => {
317
- const toast = document.getElementById('error-toast');
318
- toast.textContent = "Connection is taking longer than usual. Are you on a VPN?";
319
- toast.className = 'toast warning';
320
- toast.style.display = 'block';
321
-
322
- // Hide warning after 5 seconds
323
- setTimeout(() => {
324
- toast.style.display = 'none';
325
- }, 5000);
326
- }, 5000);
327
-
328
- try {
329
- // Request access to user's microphone
330
- const stream = await navigator.mediaDevices.getUserMedia({
331
- audio: true // Only request audio access
332
- });
333
-
334
- // Set up audio visualization
335
- setupAudioVisualization(stream);
336
-
337
- // Add audio tracks to WebRTC connection
338
- stream.getTracks().forEach(track => {
339
- peerConnection.addTrack(track, stream);
340
- });
341
-
342
- // Monitor connection state changes
343
- peerConnection.addEventListener('connectionstatechange', () => {
344
- // Log state changes
345
- console.log('connectionstatechange', peerConnection.connectionState);
346
-
347
- // Handle successful connection
348
- if (peerConnection.connectionState === 'connected') {
349
- clearTimeout(timeoutId);
350
- clearTimeout(connectionTimeout);
351
- const toast = document.getElementById('error-toast');
352
- toast.style.display = 'none';
353
- // Handle connection failures
354
- } else if (peerConnection.connectionState === 'failed' ||
355
- peerConnection.connectionState === 'disconnected' ||
356
- peerConnection.connectionState === 'closed') {
357
- showError('Connection lost. Please try again.');
358
- stop();
359
- }
360
- // Update button appearance
361
- updateButtonState();
362
- });
363
-
364
- // Create data channel for server messages
365
- const dataChannel = peerConnection.createDataChannel('text');
366
- dataChannel.onmessage = handleMessage; // Set message handler
367
-
368
- // Create connection offer
369
- const offer = await peerConnection.createOffer();
370
- // Set local description (our end of connection)
371
- await peerConnection.setLocalDescription(offer);
372
-
373
- // Wait for ICE gathering to complete (finding connection methods)
374
- await new Promise((resolve) => {
375
- if (peerConnection.iceGatheringState === "complete") {
376
- resolve(); // Already complete
377
- } else {
378
- // Function to check ICE gathering state
379
- const checkState = () => {
380
- if (peerConnection.iceGatheringState === "complete") {
381
- peerConnection.removeEventListener("icegatheringstatechange", checkState);
382
- resolve(); // Complete gathering
383
- }
384
- };
385
- // Listen for ICE gathering state changes
386
- peerConnection.addEventListener("icegatheringstatechange", checkState);
387
- }
388
- });
389
-
390
- // Generate random ID for this connection
391
- webrtc_id = Math.random().toString(36).substring(7);
392
-
393
- // Send connection offer to server
394
- const response = await fetch('/webrtc/offer', {
395
- method: 'POST',
396
- headers: { 'Content-Type': 'application/json' },
397
- body: JSON.stringify({
398
- sdp: peerConnection.localDescription.sdp, // Session description
399
- type: peerConnection.localDescription.type, // Offer type
400
- webrtc_id: webrtc_id // Unique connection ID
401
- })
402
- });
403
-
404
- // Parse server response
405
- const serverResponse = await response.json();
406
-
407
- // Handle server errors
408
- if (serverResponse.status === 'failed') {
409
- showError(serverResponse.meta.error === 'concurrency_limit_reached'
410
- ? `Too many connections. Maximum limit is ${serverResponse.meta.limit}`
411
- : serverResponse.meta.error);
412
- stop();
413
- startButton.textContent = 'Start Recording';
414
- return;
415
- }
416
-
417
- // Complete connection with server's description
418
- await peerConnection.setRemoteDescription(serverResponse);
419
-
420
- // Create event source for receiving transcription results
421
- eventSource = new EventSource('/transcript?webrtc_id=' + webrtc_id);
422
- // Handle event source errors
423
- eventSource.onerror = (event) => {
424
- console.error("EventSource error:", event);
425
- showError("Transcription connection lost. Please try again.");
426
- };
427
- // Process transcription results as they arrive
428
- eventSource.addEventListener("output", (event) => {
429
- console.log("Received transcript chunk:", event.data);
430
- // Add text to display
431
- appendTranscript(event.data);
432
- //appendTranscriptSimple(event.data);
433
- });
434
- } catch (err) {
435
- // Handle any setup errors
436
- clearTimeout(timeoutId);
437
- console.error('Error setting up WebRTC:', err);
438
- showError('Failed to establish connection. Please try again.');
439
- stop();
440
- startButton.textContent = 'Start Recording';
441
- }
442
- }
443
-
444
- function appendTranscriptSimple(text) {
445
- const p = document.createElement('p');
446
- p.textContent = text;
447
- transcriptDiv.appendChild(p);
448
- transcriptDiv.scrollTop = transcriptDiv.scrollHeight;
449
- }
450
-
451
- // Add transcription text to display
452
- function appendTranscript(text) {
453
- // Clean up text
454
- const formattedText = text.trim();
455
- if (!formattedText) return;
456
-
457
- const now = Date.now();
458
- const timeSinceLastUpdate = now - lastUpdateTime;
459
- lastUpdateTime = now;
460
-
461
- // Handle transcript display
462
- if (!currentParagraph) {
463
- // Create new paragraph
464
- currentParagraph = document.createElement('p');
465
- currentParagraph.classList.add('current');
466
- transcriptDiv.appendChild(currentParagraph);
467
- currentParagraph.textContent = formattedText;
468
- } else {
469
- // Get current text
470
- const currentText = currentParagraph.textContent;
471
-
472
- // Fix spacing issues by normalizing
473
- let cleanedText = formattedText;
474
-
475
- // 1. Check for simple word repetition - last word repeated
476
- const words = currentText.split(/\s+/);
477
- const lastWord = words[words.length - 1].replace(/[^\w]/g, '').toLowerCase();
478
-
479
- if (lastWord && lastWord.length > 2) {
480
- // Check if new text starts with the same word
481
- const regex = new RegExp(`^${lastWord}`, 'i');
482
- if (regex.test(cleanedText.replace(/[^\w]/g, ''))) {
483
- // Remove the first word if it's a duplicate
484
- cleanedText = cleanedText.replace(regex, '').trim();
485
- }
486
- }
487
-
488
- // 2. Add proper spacing
489
- let finalText = currentText;
490
-
491
- // Only add space if current text doesn't end with space or punctuation
492
- // and new text doesn't start with punctuation
493
- if (!/[\s.,!?]$/.test(finalText) && !/^[.,!?]/.test(cleanedText) && cleanedText) {
494
- finalText += ' ';
495
- }
496
-
497
- // 3. Add the cleaned text
498
- finalText += cleanedText;
499
-
500
- // 4. Fix any run-together words by adding spaces after punctuation
501
- finalText = finalText.replace(/([.,!?])([a-zA-Z])/g, '$1 $2');
502
-
503
- // Update the paragraph text
504
- currentParagraph.textContent = finalText;
505
- }
506
-
507
- // Create new paragraph on sentence end or pause
508
- if (/[.!?]$/.test(formattedText) || timeSinceLastUpdate > 5000) {
509
- // End current paragraph
510
- if (currentParagraph) {
511
- currentParagraph.classList.remove('current');
512
- }
513
-
514
- // Prepare for next paragraph
515
- currentParagraph = null;
516
- }
517
-
518
- // Limit number of displayed paragraphs
519
- const paragraphs = transcriptDiv.getElementsByTagName('p');
520
- while (paragraphs.length > 10) { // Keep last 10 paragraphs
521
- transcriptDiv.removeChild(paragraphs[0]);
522
- }
523
-
524
- // Scroll to show newest text
525
- requestAnimationFrame(() => {
526
- transcriptDiv.scrollTop = transcriptDiv.scrollHeight;
527
- });
528
- }
529
-
530
- // Stop recording and clean up resources
531
- function stop() {
532
- // Stop audio visualization
533
- if (animationFrame) {
534
- cancelAnimationFrame(animationFrame);
535
- animationFrame = null;
536
- }
537
-
538
- // Pause audio processing
539
- if (audioContext) {
540
- audioContext.suspend();
541
- }
542
-
543
- // Stop all media tracks
544
- if (peerConnection) {
545
- const senders = peerConnection.getSenders();
546
- if (senders) {
547
- senders.forEach(sender => {
548
- if (sender.track) {
549
- sender.track.stop(); // Release microphone
550
- }
551
- });
552
- }
553
-
554
- // Close WebRTC connection
555
- peerConnection.close();
556
- peerConnection = null;
557
- }
558
-
559
- // Close transcription connection
560
- if (eventSource) {
561
- eventSource.close();
562
- eventSource = null;
563
- }
564
-
565
- // Reset audio level
566
- audioLevel = 0;
567
- // Update button display
568
- updateButtonState();
569
-
570
- // Ask about clearing transcript
571
- if (window.confirm('Clear transcript?')) {
572
- // Clear all transcript text
573
- transcriptDiv.innerHTML = '';
574
- currentParagraph = null;
575
- } else {
576
- // Just end current paragraph
577
- if (currentParagraph) {
578
- currentParagraph.classList.remove('current');
579
- currentParagraph = null;
580
- }
581
- }
582
-
583
- // Reset timestamp
584
- lastUpdateTime = Date.now();
585
- }
586
-
587
- // Clean up resources when page is closed
588
- window.addEventListener('beforeunload', () => {
589
- stop(); // Stop recording and release resources
590
- });
591
-
592
- // Handle start/stop button clicks
593
- startButton.addEventListener('click', () => {
594
- if (!isRecording) {
595
- // Start recording if not already recording
596
- setupWebRTC();
597
- } else {
598
- // Stop recording if currently recording
599
- stop();
600
- }
601
- });
602
-
603
- // Initialize UI when page loads
604
- document.addEventListener('DOMContentLoaded', () => {
605
- // Ensure all UI elements are visible
606
- const elementsToCheck = [
607
- transcriptDiv,
608
- startButton,
609
- document.getElementById('error-toast')
610
- ];
611
-
612
- // Set appropriate display for each element
613
- elementsToCheck.forEach(el => {
614
- if (el) {
615
- // Set appropriate display style based on element type
616
- el.style.display = el.tagName.toLowerCase() === 'button' ? 'block' :
617
- (el.id === 'transcript' ? 'block' : 'none');
618
- }
619
- });
620
-
621
- // Apply CSS variables to ensure theme is working
622
- document.body.style.backgroundColor = 'var(--background-dark)';
623
- document.body.style.color = 'var(--text-light)';
624
-
625
- // Force button colors for consistency
626
- startButton.style.backgroundColor = 'rgba(249, 164, 92, 1.0)';
627
- startButton.style.color = 'black';
628
- });
629
- </script>
630
- </body>
631
-
632
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
main.py CHANGED
@@ -1,6 +1,5 @@
1
  import os
2
  import logging
3
- import uvicorn
4
  import json
5
 
6
  import gradio as gr
@@ -8,7 +7,7 @@ import numpy as np
8
  from dotenv import load_dotenv
9
  from fastapi import FastAPI
10
  from fastapi.responses import StreamingResponse, HTMLResponse
11
-
12
  from fastrtc import (
13
  AdditionalOutputs,
14
  ReplyOnPause,
@@ -25,39 +24,46 @@ from transformers import (
25
  from transformers.utils import is_flash_attn_2_available
26
 
27
  from utils.logger_config import setup_logging
28
- from utils.device import get_device, get_torch_and_np_dtypes, cuda_version_check
29
  from utils.turn_server import get_rtc_credentials
30
 
31
 
32
  load_dotenv()
33
- setup_logging(level=logging.DEBUG)
34
  logger = logging.getLogger(__name__)
35
 
36
 
37
- APP_MODE = os.getenv("APP_MODE", "deployed")
 
 
38
  MODEL_ID = os.getenv("MODEL_ID", "openai/whisper-large-v3-turbo")
39
- UI_FILE = os.getenv("UI_FILE", "index.html")
40
 
41
 
42
  device = get_device(force_cpu=False)
43
  torch_dtype, np_dtype = get_torch_and_np_dtypes(device, use_bfloat16=False)
44
  logger.info(f"Using device: {device}, torch_dtype: {torch_dtype}, np_dtype: {np_dtype}")
45
 
46
- cuda_version, device_name = cuda_version_check()
47
- logger.info(f"CUDA Version: {cuda_version}, GPU Device: {device_name}")
48
 
49
  attention = "flash_attention_2" if is_flash_attn_2_available() else "sdpa"
50
  logger.info(f"Using attention: {attention}")
51
 
52
  logger.info(f"Loading Whisper model: {MODEL_ID}")
53
- model = AutoModelForSpeechSeq2Seq.from_pretrained(
54
- MODEL_ID,
55
- torch_dtype=torch_dtype,
56
- low_cpu_mem_usage=True,
57
- use_safetensors=True,
58
- attn_implementation=attention
59
- )
60
- model.to(device)
 
 
 
 
 
 
 
61
 
62
  processor = AutoProcessor.from_pretrained(MODEL_ID)
63
 
@@ -83,11 +89,11 @@ async def transcribe(audio: tuple[int, np.ndarray]):
83
 
84
  outputs = transcribe_pipeline(
85
  audio_to_bytes(audio),
86
- chunk_length_s=3,
87
  batch_size=1,
88
  generate_kwargs={
89
  'task': 'transcribe',
90
- 'language': 'english',
91
  },
92
  #return_timestamps="word"
93
  )
@@ -102,7 +108,7 @@ stream = Stream(
102
  # Duration in seconds of audio chunks (default 0.6)
103
  audio_chunk_duration=0.6,
104
  # If the chunk has more than started_talking_threshold seconds of speech, the user started talking (default 0.2)
105
- started_talking_threshold=0.2,
106
  # If, after the user started speaking, there is a chunk with less than speech_threshold seconds of speech, the user stopped speaking. (default 0.1)
107
  speech_threshold=0.1,
108
  ),
@@ -112,13 +118,13 @@ stream = Stream(
112
  # Final speech chunks shorter min_speech_duration_ms are thrown out (default 250)
113
  min_speech_duration_ms=250,
114
  # Max duration of speech chunks, longer will be split (default float('inf'))
115
- max_speech_duration_s=3,
116
  # Wait for ms at the end of each speech chunk before separating it (default 2000)
117
- min_silence_duration_ms=2000,
118
  # Chunk size for VAD model. Can be 512, 1024, 1536 for 16k s.r. (default 1024)
119
  window_size_samples=1024,
120
  # Final speech chunks are padded by speech_pad_ms each side (default 400)
121
- speech_pad_ms=400,
122
  ),
123
  ),
124
  # send-receive: bidirectional streaming (default)
@@ -135,11 +141,16 @@ stream = Stream(
135
  )
136
 
137
  app = FastAPI()
 
138
  stream.mount(app)
139
 
140
  @app.get("/")
141
  async def index():
142
- html_content = open(UI_FILE).read()
 
 
 
 
143
  rtc_config = get_rtc_credentials(provider="hf") if APP_MODE == "deployed" else None
144
  return HTMLResponse(content=html_content.replace("__RTC_CONFIGURATION__", json.dumps(rtc_config)))
145
 
 
1
  import os
2
  import logging
 
3
  import json
4
 
5
  import gradio as gr
 
7
  from dotenv import load_dotenv
8
  from fastapi import FastAPI
9
  from fastapi.responses import StreamingResponse, HTMLResponse
10
+ from fastapi.staticfiles import StaticFiles
11
  from fastrtc import (
12
  AdditionalOutputs,
13
  ReplyOnPause,
 
24
  from transformers.utils import is_flash_attn_2_available
25
 
26
  from utils.logger_config import setup_logging
27
+ from utils.device import get_device, get_torch_and_np_dtypes
28
  from utils.turn_server import get_rtc_credentials
29
 
30
 
31
  load_dotenv()
32
+ setup_logging()
33
  logger = logging.getLogger(__name__)
34
 
35
 
36
+ UI_MODE = os.getenv("UI_MODE", "fastapi").lower() # gradio | fastapi
37
+ UI_TYPE = os.getenv("UI_TYPE", "base").lower() # base | screen
38
+ APP_MODE = os.getenv("APP_MODE", "local").lower() # local | deployed
39
  MODEL_ID = os.getenv("MODEL_ID", "openai/whisper-large-v3-turbo")
40
+ LANGUAGE = os.getenv("LANGUAGE", "english").lower()
41
 
42
 
43
  device = get_device(force_cpu=False)
44
  torch_dtype, np_dtype = get_torch_and_np_dtypes(device, use_bfloat16=False)
45
  logger.info(f"Using device: {device}, torch_dtype: {torch_dtype}, np_dtype: {np_dtype}")
46
 
 
 
47
 
48
  attention = "flash_attention_2" if is_flash_attn_2_available() else "sdpa"
49
  logger.info(f"Using attention: {attention}")
50
 
51
  logger.info(f"Loading Whisper model: {MODEL_ID}")
52
+ logger.info(f"Using language: {LANGUAGE}")
53
+
54
+ try:
55
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(
56
+ MODEL_ID,
57
+ torch_dtype=torch_dtype,
58
+ low_cpu_mem_usage=True,
59
+ use_safetensors=True,
60
+ attn_implementation=attention
61
+ )
62
+ model.to(device)
63
+ except Exception as e:
64
+ logger.error(f"Error loading ASR model: {e}")
65
+ logger.error(f"Are you providing a valid model ID? {MODEL_ID}")
66
+ raise
67
 
68
  processor = AutoProcessor.from_pretrained(MODEL_ID)
69
 
 
89
 
90
  outputs = transcribe_pipeline(
91
  audio_to_bytes(audio),
92
+ chunk_length_s=6,
93
  batch_size=1,
94
  generate_kwargs={
95
  'task': 'transcribe',
96
+ 'language': LANGUAGE,
97
  },
98
  #return_timestamps="word"
99
  )
 
108
  # Duration in seconds of audio chunks (default 0.6)
109
  audio_chunk_duration=0.6,
110
  # If the chunk has more than started_talking_threshold seconds of speech, the user started talking (default 0.2)
111
+ started_talking_threshold=0.1,
112
  # If, after the user started speaking, there is a chunk with less than speech_threshold seconds of speech, the user stopped speaking. (default 0.1)
113
  speech_threshold=0.1,
114
  ),
 
118
  # Final speech chunks shorter min_speech_duration_ms are thrown out (default 250)
119
  min_speech_duration_ms=250,
120
  # Max duration of speech chunks, longer will be split (default float('inf'))
121
+ max_speech_duration_s=6,
122
  # Wait for ms at the end of each speech chunk before separating it (default 2000)
123
+ min_silence_duration_ms=100,
124
  # Chunk size for VAD model. Can be 512, 1024, 1536 for 16k s.r. (default 1024)
125
  window_size_samples=1024,
126
  # Final speech chunks are padded by speech_pad_ms each side (default 400)
127
+ speech_pad_ms=200,
128
  ),
129
  ),
130
  # send-receive: bidirectional streaming (default)
 
141
  )
142
 
143
  app = FastAPI()
144
+ app.mount("/static", StaticFiles(directory="static"), name="static")
145
  stream.mount(app)
146
 
147
  @app.get("/")
148
  async def index():
149
+ if UI_TYPE == "base":
150
+ html_content = open("static/index.html").read()
151
+ else:
152
+ html_content = open("static/index-screen.html").read()
153
+
154
  rtc_config = get_rtc_credentials(provider="hf") if APP_MODE == "deployed" else None
155
  return HTMLResponse(content=html_content.replace("__RTC_CONFIGURATION__", json.dumps(rtc_config)))
156
 
static/client.js ADDED
@@ -0,0 +1,479 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Wait for the DOM to be fully loaded before running any code
2
+ document.addEventListener('DOMContentLoaded', function() {
3
+ // Global variables for WebRTC connection
4
+ let peerConnection; // Stores the WebRTC connection object for audio streaming
5
+ let webrtc_id; // A unique ID to identify this connection on the server
6
+ let audioContext, analyser, audioSource; // Audio processing objects for visualization
7
+ let audioLevel = 0; // Stores the current audio level (volume) from 0-1
8
+ let animationFrame; // Reference to the animation frame for audio visualization
9
+ let isRecording = false; // Tracks whether we're currently recording or not
10
+ let eventSource; // Object that receives transcription results from the server
11
+
12
+ // DOM element references
13
+ const startButton = document.getElementById('start-button'); // The button to start/stop recording
14
+ const transcriptDiv = document.getElementById('transcript'); // The container for transcription text
15
+
16
+ // Log debug info at start
17
+ console.log('DOM loaded. startButton:', startButton, 'transcriptDiv:', transcriptDiv);
18
+
19
+ // Variables for managing the transcript display
20
+ let currentParagraph = null; // Reference to the current paragraph being updated
21
+ let lastUpdateTime = Date.now(); // Timestamp of when we last updated the transcript
22
+
23
+ // Show error messages to the user in a toast notification
24
+ function showError(message) {
25
+ console.error('Error:', message);
26
+ const toast = document.getElementById('error-toast'); // Get the toast element
27
+ toast.textContent = message; // Set the error message
28
+ toast.style.display = 'block'; // Make the toast visible
29
+
30
+ // Hide toast after 5 seconds
31
+ setTimeout(() => {
32
+ toast.style.display = 'none'; // Hide the toast
33
+ }, 5000);
34
+ }
35
+
36
+ // Handle messages received from the server through WebRTC data channel
37
+ function handleMessage(event) {
38
+ // Parse JSON message
39
+ const eventJson = JSON.parse(event.data);
40
+ // Display errors to the user
41
+ if (eventJson.type === "error") {
42
+ showError(eventJson.message);
43
+ }
44
+ // Log all messages to console for debugging
45
+ console.log('Received message:', event.data);
46
+ }
47
+
48
+ // Update button appearance based on connection state
49
+ function updateButtonState() {
50
+ // If connecting, show spinner
51
+ if (peerConnection && (peerConnection.connectionState === 'connecting' || peerConnection.connectionState === 'new')) {
52
+ startButton.innerHTML = `
53
+ <div class="icon-with-spinner">
54
+ <div class="spinner"></div>
55
+ <span>Connecting...</span>
56
+ </div>
57
+ `;
58
+ isRecording = false; // Not recording while connecting
59
+ // If connected, show pulsing recording indicator
60
+ } else if (peerConnection && peerConnection.connectionState === 'connected') {
61
+ startButton.innerHTML = `
62
+ <div class="pulse-container">
63
+ <div class="pulse-circle"></div>
64
+ <span>Stop Recording</span>
65
+ </div>
66
+ `;
67
+ isRecording = true; // Set recording state to true
68
+ // Default state - ready to start
69
+ } else {
70
+ startButton.innerHTML = 'Start Recording';
71
+ isRecording = false; // Not recording when not connected
72
+ }
73
+ console.log('Button state updated. isRecording:', isRecording);
74
+ }
75
+
76
+ // Set up audio visualization to show when the user is speaking
77
+ function setupAudioVisualization(stream) {
78
+ // Create or resume the audio context
79
+ if (!audioContext) {
80
+ // Create new audio context with browser compatibility handling
81
+ audioContext = new (window.AudioContext || window.webkitAudioContext)();
82
+ } else {
83
+ // Resume context if it was suspended
84
+ if (audioContext.state === 'suspended') {
85
+ audioContext.resume();
86
+ }
87
+ }
88
+
89
+ // Create audio analyzer for processing audio data
90
+ analyser = audioContext.createAnalyser();
91
+ // Create media source from microphone stream
92
+ audioSource = audioContext.createMediaStreamSource(stream);
93
+ // Connect source to analyzer
94
+ audioSource.connect(analyser);
95
+ // Set FFT size (controls frequency data resolution)
96
+ analyser.fftSize = 64;
97
+ // Create array to store frequency data
98
+ const dataArray = new Uint8Array(analyser.frequencyBinCount);
99
+
100
+ // Function to continuously update audio level visualization
101
+ function updateAudioLevel() {
102
+ // Get audio frequency data
103
+ analyser.getByteFrequencyData(dataArray);
104
+ // Calculate average volume across all frequencies
105
+ const average = Array.from(dataArray).reduce((a, b) => a + b, 0) / dataArray.length;
106
+ // Convert to 0-1 scale
107
+ audioLevel = average / 255;
108
+
109
+ // Update pulse circle size based on audio level
110
+ const pulseCircle = document.querySelector('.pulse-circle');
111
+ if (pulseCircle) {
112
+ pulseCircle.style.setProperty('--audio-level', 1 + audioLevel);
113
+ }
114
+
115
+ // Continue animation loop
116
+ animationFrame = requestAnimationFrame(updateAudioLevel);
117
+ }
118
+ // Start audio visualization loop
119
+ updateAudioLevel();
120
+ }
121
+
122
+ // Set up WebRTC connection for streaming audio to server
123
+ async function setupWebRTC() {
124
+ console.log('Setting up WebRTC connection...');
125
+
126
+ try {
127
+ // Get WebRTC configuration from global variable
128
+ const config = window.__RTC_CONFIGURATION__ || null;
129
+ console.log('WebRTC configuration:', config);
130
+
131
+ // Create new peer connection
132
+ peerConnection = new RTCPeerConnection(config);
133
+ console.log('Created peer connection:', peerConnection);
134
+
135
+ // Set connection timeout (15 seconds)
136
+ const connectionTimeout = setTimeout(() => {
137
+ if (peerConnection && peerConnection.connectionState !== 'connected') {
138
+ showError('Connection timeout. Please check your network and try again.');
139
+ stop(); // Stop connection attempt
140
+ }
141
+ }, 15000);
142
+
143
+ // Set warning for slow connection (5 seconds)
144
+ const timeoutId = setTimeout(() => {
145
+ const toast = document.getElementById('error-toast');
146
+ toast.textContent = "Connection is taking longer than usual. Are you on a VPN?";
147
+ toast.className = 'toast warning';
148
+ toast.style.display = 'block';
149
+
150
+ // Hide warning after 5 seconds
151
+ setTimeout(() => {
152
+ toast.style.display = 'none';
153
+ }, 5000);
154
+ }, 5000);
155
+
156
+ // Update button to show connecting state
157
+ updateButtonState();
158
+
159
+ // Request access to user's microphone
160
+ console.log('Requesting microphone access...');
161
+ const stream = await navigator.mediaDevices.getUserMedia({
162
+ audio: true // Only request audio access
163
+ });
164
+ console.log('Microphone access granted:', stream);
165
+
166
+ // Set up audio visualization
167
+ setupAudioVisualization(stream);
168
+
169
+ // Add audio tracks to WebRTC connection
170
+ stream.getTracks().forEach(track => {
171
+ peerConnection.addTrack(track, stream);
172
+ });
173
+ console.log('Added audio tracks to connection');
174
+
175
+ // Monitor connection state changes
176
+ peerConnection.addEventListener('connectionstatechange', () => {
177
+ // Log state changes
178
+ console.log('connectionstatechange', peerConnection.connectionState);
179
+
180
+ // Handle successful connection
181
+ if (peerConnection.connectionState === 'connected') {
182
+ clearTimeout(timeoutId);
183
+ clearTimeout(connectionTimeout);
184
+ const toast = document.getElementById('error-toast');
185
+ toast.style.display = 'none';
186
+ console.log('Connection established successfully');
187
+ // Handle connection failures
188
+ } else if (peerConnection.connectionState === 'failed' ||
189
+ peerConnection.connectionState === 'disconnected' ||
190
+ peerConnection.connectionState === 'closed') {
191
+ showError('Connection lost. Please try again.');
192
+ stop();
193
+ }
194
+ // Update button appearance
195
+ updateButtonState();
196
+ });
197
+
198
+ // Create data channel for server messages
199
+ const dataChannel = peerConnection.createDataChannel('text');
200
+ dataChannel.onmessage = handleMessage; // Set message handler
201
+ console.log('Created data channel');
202
+
203
+ // Create connection offer
204
+ console.log('Creating connection offer...');
205
+ const offer = await peerConnection.createOffer();
206
+ // Set local description (our end of connection)
207
+ await peerConnection.setLocalDescription(offer);
208
+ console.log('Local description set');
209
+
210
+ // Wait for ICE gathering to complete (finding connection methods)
211
+ console.log('Waiting for ICE gathering...');
212
+ await new Promise((resolve) => {
213
+ if (peerConnection.iceGatheringState === "complete") {
214
+ resolve(); // Already complete
215
+ } else {
216
+ // Function to check ICE gathering state
217
+ const checkState = () => {
218
+ if (peerConnection.iceGatheringState === "complete") {
219
+ peerConnection.removeEventListener("icegatheringstatechange", checkState);
220
+ resolve(); // Complete gathering
221
+ }
222
+ };
223
+ // Listen for ICE gathering state changes
224
+ peerConnection.addEventListener("icegatheringstatechange", checkState);
225
+ }
226
+ });
227
+ console.log('ICE gathering complete');
228
+
229
+ // Generate random ID for this connection
230
+ webrtc_id = Math.random().toString(36).substring(7);
231
+ console.log('Generated webrtc_id:', webrtc_id);
232
+
233
+ // Send connection offer to server
234
+ console.log('Sending offer to server...');
235
+ const response = await fetch('/webrtc/offer', {
236
+ method: 'POST',
237
+ headers: { 'Content-Type': 'application/json' },
238
+ body: JSON.stringify({
239
+ sdp: peerConnection.localDescription.sdp, // Session description
240
+ type: peerConnection.localDescription.type, // Offer type
241
+ webrtc_id: webrtc_id // Unique connection ID
242
+ })
243
+ });
244
+ console.log('Server responded to offer');
245
+
246
+ // Parse server response
247
+ const serverResponse = await response.json();
248
+ console.log('Server response:', serverResponse);
249
+
250
+ // Handle server errors
251
+ if (serverResponse.status === 'failed') {
252
+ showError(serverResponse.meta.error === 'concurrency_limit_reached'
253
+ ? `Too many connections. Maximum limit is ${serverResponse.meta.limit}`
254
+ : serverResponse.meta.error);
255
+ stop();
256
+ startButton.textContent = 'Start Recording';
257
+ return;
258
+ }
259
+
260
+ // Complete connection with server's description
261
+ console.log('Setting remote description...');
262
+ await peerConnection.setRemoteDescription(serverResponse);
263
+ console.log('Remote description set');
264
+
265
+ // Create event source for receiving transcription results
266
+ console.log('Creating event source for transcription...');
267
+ eventSource = new EventSource('/transcript?webrtc_id=' + webrtc_id);
268
+ // Handle event source errors
269
+ eventSource.onerror = (event) => {
270
+ console.error("EventSource error:", event);
271
+ showError("Transcription connection lost. Please try again.");
272
+ };
273
+ // Process transcription results as they arrive
274
+ eventSource.addEventListener("output", (event) => {
275
+ console.log("Received transcript chunk:", event.data);
276
+ // Add text to display
277
+ appendTranscript(event.data);
278
+ });
279
+
280
+ console.log('WebRTC setup complete, waiting for connection...');
281
+ } catch (err) {
282
+ // Handle any setup errors
283
+ console.error('Error setting up WebRTC:', err);
284
+ showError('Failed to establish connection: ' + err.message);
285
+ stop();
286
+ startButton.textContent = 'Start Recording';
287
+ }
288
+ }
289
+
290
+ function appendTranscriptSimple(text) {
291
+ const p = document.createElement('p');
292
+ p.textContent = text;
293
+ transcriptDiv.appendChild(p);
294
+ transcriptDiv.scrollTop = transcriptDiv.scrollHeight;
295
+ }
296
+
297
+ // Add transcription text to display
298
+ function appendTranscript(text) {
299
+ // Clean up text
300
+ const formattedText = text.trim();
301
+ if (!formattedText) return;
302
+
303
+ const now = Date.now();
304
+ const timeSinceLastUpdate = now - lastUpdateTime;
305
+ lastUpdateTime = now;
306
+
307
+ // Handle transcript display
308
+ if (!currentParagraph) {
309
+ // Create new paragraph
310
+ currentParagraph = document.createElement('p');
311
+ currentParagraph.classList.add('current');
312
+ transcriptDiv.appendChild(currentParagraph);
313
+ currentParagraph.textContent = formattedText;
314
+ } else {
315
+ // Get current text
316
+ const currentText = currentParagraph.textContent;
317
+
318
+ // Fix spacing issues by normalizing
319
+ let cleanedText = formattedText;
320
+
321
+ // 1. Check for simple word repetition - last word repeated
322
+ const words = currentText.split(/\s+/);
323
+ const lastWord = words[words.length - 1].replace(/[^\w]/g, '').toLowerCase();
324
+
325
+ if (lastWord && lastWord.length > 2) {
326
+ // Check if new text starts with the same word
327
+ const regex = new RegExp(`^${lastWord}`, 'i');
328
+ if (regex.test(cleanedText.replace(/[^\w]/g, ''))) {
329
+ // Remove the first word if it's a duplicate
330
+ cleanedText = cleanedText.replace(regex, '').trim();
331
+ }
332
+ }
333
+
334
+ // 2. Add proper spacing
335
+ let finalText = currentText;
336
+
337
+ // Only add space if current text doesn't end with space or punctuation
338
+ // and new text doesn't start with punctuation
339
+ if (!/[\s.,!?]$/.test(finalText) && !/^[.,!?]/.test(cleanedText) && cleanedText) {
340
+ finalText += ' ';
341
+ }
342
+
343
+ // 3. Add the cleaned text
344
+ finalText += cleanedText;
345
+
346
+ // 4. Fix any run-together words by adding spaces after punctuation
347
+ finalText = finalText.replace(/([.,!?])([a-zA-Z])/g, '$1 $2');
348
+
349
+ // Update the paragraph text
350
+ currentParagraph.textContent = finalText;
351
+ }
352
+
353
+ // Create new paragraph on sentence end or pause
354
+ if (/[.!?]$/.test(formattedText) || timeSinceLastUpdate > 5000) {
355
+ // End current paragraph
356
+ if (currentParagraph) {
357
+ currentParagraph.classList.remove('current');
358
+ }
359
+
360
+ // Prepare for next paragraph
361
+ currentParagraph = null;
362
+ }
363
+
364
+ // Limit number of displayed paragraphs
365
+ const paragraphs = transcriptDiv.getElementsByTagName('p');
366
+ while (paragraphs.length > 10) { // Keep last 10 paragraphs
367
+ transcriptDiv.removeChild(paragraphs[0]);
368
+ }
369
+
370
+ // Scroll to show newest text
371
+ requestAnimationFrame(() => {
372
+ transcriptDiv.scrollTop = transcriptDiv.scrollHeight;
373
+ });
374
+ }
375
+
376
+ // Stop recording and clean up resources
377
+ function stop() {
378
+ console.log('Stopping recording...');
379
+ // Stop audio visualization
380
+ if (animationFrame) {
381
+ cancelAnimationFrame(animationFrame);
382
+ animationFrame = null;
383
+ }
384
+
385
+ // Pause audio processing
386
+ if (audioContext) {
387
+ audioContext.suspend();
388
+ }
389
+
390
+ // Stop all media tracks
391
+ if (peerConnection) {
392
+ const senders = peerConnection.getSenders();
393
+ if (senders) {
394
+ senders.forEach(sender => {
395
+ if (sender.track) {
396
+ sender.track.stop(); // Release microphone
397
+ }
398
+ });
399
+ }
400
+
401
+ // Close WebRTC connection
402
+ peerConnection.close();
403
+ peerConnection = null;
404
+ }
405
+
406
+ // Close transcription connection
407
+ if (eventSource) {
408
+ eventSource.close();
409
+ eventSource = null;
410
+ }
411
+
412
+ // Reset audio level
413
+ audioLevel = 0;
414
+ // Update button display
415
+ updateButtonState();
416
+
417
+ // Ask about clearing transcript
418
+ if (window.confirm('Clear transcript?')) {
419
+ // Clear all transcript text
420
+ transcriptDiv.innerHTML = '';
421
+ currentParagraph = null;
422
+ } else {
423
+ // Just end current paragraph
424
+ if (currentParagraph) {
425
+ currentParagraph.classList.remove('current');
426
+ currentParagraph = null;
427
+ }
428
+ }
429
+
430
+ // Reset timestamp
431
+ lastUpdateTime = Date.now();
432
+ console.log('Recording stopped');
433
+ }
434
+
435
+ // Clean up resources when page is closed
436
+ window.addEventListener('beforeunload', () => {
437
+ stop(); // Stop recording and release resources
438
+ });
439
+
440
+ // Handle start/stop button clicks
441
+ startButton.addEventListener('click', () => {
442
+ console.log('Start button clicked. isRecording:', isRecording);
443
+ if (!isRecording) {
444
+ // Start recording if not already recording
445
+ setupWebRTC();
446
+ } else {
447
+ // Stop recording if currently recording
448
+ stop();
449
+ }
450
+ });
451
+
452
+ // Initialize UI when page loads
453
+ console.log('Initializing UI...');
454
+ // Ensure all UI elements are visible
455
+ const elementsToCheck = [
456
+ transcriptDiv,
457
+ startButton,
458
+ document.getElementById('error-toast')
459
+ ];
460
+
461
+ // Set appropriate display for each element
462
+ elementsToCheck.forEach(el => {
463
+ if (el) {
464
+ // Set appropriate display style based on element type
465
+ el.style.display = el.tagName.toLowerCase() === 'button' ? 'block' :
466
+ (el.id === 'transcript' ? 'block' : 'none');
467
+ }
468
+ });
469
+
470
+ // Apply CSS variables to ensure theme is working
471
+ document.body.style.backgroundColor = 'var(--background-dark)';
472
+ document.body.style.color = 'var(--text-light)';
473
+
474
+ // Force button colors for consistency
475
+ startButton.style.backgroundColor = 'rgba(249, 164, 92, 1.0)';
476
+ startButton.style.color = 'black';
477
+
478
+ console.log('UI initialization complete');
479
+ });
static/index-screen.html ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+
4
+ <head>
5
+ <meta charset="UTF-8">
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
7
+ <title>Real-time Whisper Transcription</title>
8
+ <script>
9
+ window.__RTC_CONFIGURATION__ = __RTC_CONFIGURATION__;
10
+ </script>
11
+ <style>
12
+ :root {
13
+ --background-dark: #000000;
14
+ --text-light: #ffffff;
15
+ }
16
+
17
+ body {
18
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
19
+ margin: 0; /* Removes default margin */
20
+ padding: 0; /* Removes default padding */
21
+ background-color: var(--background-dark); /* Sets background to black */
22
+ color: var(--text-light); /* Sets text to white */
23
+ min-height: 100vh; /* Ensures page fills entire viewport height */
24
+ }
25
+
26
+ /* Hide the header in presentation mode */
27
+ .hero {
28
+ display: none; /* Hides the hero section completely */
29
+ }
30
+
31
+ .container {
32
+ max-width: 100%; /* Makes container full width */
33
+ margin: 0; /* Removes margin */
34
+ padding: 1rem; /* Adds small padding all around */
35
+ }
36
+
37
+ /* Base styling for transcript container */
38
+ .transcript-container {
39
+ height: 90vh; /* Sets height to 90% of viewport height */
40
+ border: none; /* Removes border */
41
+ padding: 2rem; /* Adds generous padding inside */
42
+ background: var(--background-dark); /* Ensures background is black */
43
+ color: var(--text-light); /* Ensures text is white */
44
+ overflow-y: auto; /* Enables vertical scrolling when content overflows */
45
+ margin-bottom: 0; /* Removes bottom margin */
46
+ display: block; /* Makes element a block to take full width */
47
+ width: 100%; /* Sets width to 100% */
48
+ }
49
+
50
+ /* Styling for transcript paragraphs */
51
+ .transcript-container p {
52
+ margin: 0.5rem 0; /* Small vertical margin between paragraphs */
53
+ padding: 0.5rem 0; /* Small vertical padding within paragraphs */
54
+ background: transparent; /* Transparent background (no highlighting) */
55
+ border-radius: 0; /* No rounded corners */
56
+ line-height: 1.6; /* Increases line spacing for readability */
57
+ font-size: 3.5rem; /* rem means relative to the root font size */
58
+ font-weight: 500; /* 500 = medium weight, 700 = bold */
59
+ max-width: 98%; /* Full width within container */
60
+ white-space: normal; /* Allows text to wrap normally */
61
+ word-wrap: break-word; /* Prevents overflow of long words */
62
+ color: white; /* Explicitly sets text color to white */
63
+ display: block; /* Each paragraph takes full width */
64
+ }
65
+
66
+ /* Current paragraph styling - slightly brighter for emphasis */
67
+ .transcript-container p.current {
68
+ background: transparent; /* No background color */
69
+ color: rgba(255, 255, 255, 1.0); /* Full brightness white for current text */
70
+ }
71
+
72
+ /* Ensure all paragraphs have full opacity (keeps history visible) */
73
+ .transcript-container p:nth-last-child(n+4) {
74
+ opacity: 1.0; /* Shows all paragraphs at full opacity */
75
+ }
76
+
77
+ /* Controls for starting/stopping transcription */
78
+ .controls {
79
+ position: fixed; /* Fixes controls to viewport */
80
+ bottom: 2rem; /* Positions 2rem from bottom */
81
+ right: 2rem; /* Positions 2rem from right */
82
+ margin: 0; /* No margin */
83
+ opacity: 0.8; /* Slightly transparent when not hovered */
84
+ transition: opacity 0.3s ease; /* Smooth transition for opacity changes */
85
+ z-index: 1000; /* Ensures controls appear above other elements */
86
+ }
87
+
88
+ .controls:hover {
89
+ opacity: 1; /* Full opacity on hover */
90
+ }
91
+
92
+ /* Button styling - orange with black text for good contrast */
93
+ button {
94
+ background: rgba(249, 164, 92, 1.0); /* Solid orange background */
95
+ backdrop-filter: blur(5px); /* Blur effect for elements behind */
96
+ font-size: 1.2rem; /* Large text */
97
+ min-width: 160px; /* Minimum width for button */
98
+ padding: 15px 30px; /* Generous padding inside button */
99
+ color: black !important; /* Forces black text color */
100
+ font-weight: bold; /* Bold text for better visibility */
101
+ border: 2px solid rgba(255, 255, 255, 0.2); /* Subtle border */
102
+ border-radius: 8px; /* Rounded corners */
103
+ cursor: pointer; /* Shows pointer cursor on hover */
104
+ transition: all 0.2s ease; /* Smooth transition for hover effects */
105
+ display: block; /* Makes button take up full width */
106
+ }
107
+
108
+ button:hover {
109
+ background: rgba(249, 164, 92, 0.9); /* Slightly more transparent on hover */
110
+ transform: translateY(-2px); /* Slight upward movement on hover */
111
+ }
112
+
113
+ /* Spinner animation for loading state */
114
+ .icon-with-spinner .spinner {
115
+ border: 3px solid black; /* Spinner border */
116
+ border-top: 3px solid transparent; /* Transparent top creates spinning effect */
117
+ border-radius: 50%; /* Makes it circular */
118
+ width: 24px; /* Width of spinner */
119
+ height: 24px; /* Height of spinner */
120
+ animation: spin 1s linear infinite; /* Animation for spinning effect */
121
+ }
122
+
123
+ @keyframes spin {
124
+ 0% { transform: rotate(0deg); } /* Starting rotation */
125
+ 100% { transform: rotate(360deg); } /* Full 360Β° rotation */
126
+ }
127
+
128
+ /* Recording indicator pulse animation */
129
+ .pulse-circle {
130
+ display: inline-block; /* Allows other elements inline */
131
+ width: 12px; /* Width of pulse circle */
132
+ height: 12px; /* Height of pulse circle */
133
+ border-radius: 50%; /* Makes it circular */
134
+ background-color: red; /* Red color for recording indicator */
135
+ margin-right: 8px; /* Space to right of circle */
136
+ animation: pulse 1.5s ease infinite; /* Continuous pulsing animation */
137
+ }
138
+
139
+ @keyframes pulse {
140
+ 0% { transform: scale(0.95); opacity: 0.7; } /* Slightly smaller and transparent */
141
+ 50% { transform: scale(1.1); opacity: 1; } /* Larger and fully opaque */
142
+ 100% { transform: scale(0.95); opacity: 0.7; } /* Back to starting state */
143
+ }
144
+
145
+ /* Custom scrollbar styling */
146
+ .transcript-container::-webkit-scrollbar {
147
+ width: 8px; /* Width of scrollbar */
148
+ }
149
+
150
+ .transcript-container::-webkit-scrollbar-track {
151
+ background: var(--background-dark); /* Black scrollbar track */
152
+ }
153
+
154
+ .transcript-container::-webkit-scrollbar-thumb {
155
+ background: rgba(249, 164, 92, 0.3); /* Semi-transparent orange scrollbar thumb */
156
+ border-radius: 4px; /* Rounded corners on scrollbar thumb */
157
+ }
158
+
159
+ /* Error toast styling */
160
+ .toast {
161
+ background: rgba(0, 0, 0, 0.8); /* Semi-transparent black background */
162
+ backdrop-filter: blur(5px); /* Blur effect behind toast */
163
+ color: var(--text-light); /* White text */
164
+ font-size: 1.2rem; /* Large text size */
165
+ }
166
+ </style>
167
+ </head>
168
+
169
+ <body>
170
+ <!-- Error message container that slides in when needed -->
171
+ <div id="error-toast" class="toast"></div>
172
+ <!-- Header section (hidden in presentation mode) -->
173
+ <div class="hero">
174
+ <h1>Real-time Transcription</h1>
175
+ <p>Powered by FastRTC and Local Whisper πŸ€—</p>
176
+ </div>
177
+
178
+ <!-- Main content container -->
179
+ <div class="container">
180
+ <!-- Container for transcript text -->
181
+ <div class="transcript-container" id="transcript"></div>
182
+ <!-- Controls for starting/stopping recording -->
183
+ <div class="controls">
184
+ <button id="start-button">Start Recording</button>
185
+ </div>
186
+ </div>
187
+
188
+ <script src="/static/client.js"></script>
189
+ </body>
190
+
191
+ </html>
index.html β†’ static/index.html RENAMED
@@ -188,8 +188,8 @@
188
  <!-- Add toast element after body opening tag -->
189
  <div id="error-toast" class="toast"></div>
190
  <div class="hero">
191
- <h1>PyCon Italia 2025 Real-time Transcription</h1>
192
- <p>Powered by FastRTC and Local Whisper</p>
193
  </div>
194
 
195
  <div class="container">
@@ -199,231 +199,7 @@
199
  </div>
200
  </div>
201
 
202
- <script>
203
- let peerConnection;
204
- let webrtc_id;
205
- let audioContext, analyser, audioSource;
206
- let audioLevel = 0;
207
- let animationFrame;
208
- let eventSource;
209
-
210
- const startButton = document.getElementById('start-button');
211
- const transcriptDiv = document.getElementById('transcript');
212
-
213
- function showError(message) {
214
- const toast = document.getElementById('error-toast');
215
- toast.textContent = message;
216
- toast.style.display = 'block';
217
-
218
- // Hide toast after 5 seconds
219
- setTimeout(() => {
220
- toast.style.display = 'none';
221
- }, 5000);
222
- }
223
-
224
- function handleMessage(event) {
225
- // Handle any WebRTC data channel messages if needed
226
- const eventJson = JSON.parse(event.data);
227
- if (eventJson.type === "error") {
228
- showError(eventJson.message);
229
- }
230
- console.log('Received message:', event.data);
231
- }
232
-
233
- function updateButtonState() {
234
- if (peerConnection && (peerConnection.connectionState === 'connecting' || peerConnection.connectionState === 'new')) {
235
- startButton.innerHTML = `
236
- <div class="icon-with-spinner">
237
- <div class="spinner"></div>
238
- <span>Connecting...</span>
239
- </div>
240
- `;
241
- } else if (peerConnection && peerConnection.connectionState === 'connected') {
242
- startButton.innerHTML = `
243
- <div class="pulse-container">
244
- <div class="pulse-circle"></div>
245
- <span>Stop Recording</span>
246
- </div>
247
- `;
248
- } else {
249
- startButton.innerHTML = 'Start Recording';
250
- }
251
- }
252
-
253
- function setupAudioVisualization(stream) {
254
- audioContext = new (window.AudioContext || window.webkitAudioContext)();
255
- analyser = audioContext.createAnalyser();
256
- audioSource = audioContext.createMediaStreamSource(stream);
257
- audioSource.connect(analyser);
258
- analyser.fftSize = 64;
259
- const dataArray = new Uint8Array(analyser.frequencyBinCount);
260
-
261
- function updateAudioLevel() {
262
- analyser.getByteFrequencyData(dataArray);
263
- const average = Array.from(dataArray).reduce((a, b) => a + b, 0) / dataArray.length;
264
- audioLevel = average / 255;
265
-
266
- const pulseCircle = document.querySelector('.pulse-circle');
267
- if (pulseCircle) {
268
- pulseCircle.style.setProperty('--audio-level', 1 + audioLevel);
269
- }
270
-
271
- animationFrame = requestAnimationFrame(updateAudioLevel);
272
- }
273
- updateAudioLevel();
274
- }
275
-
276
- async function setupWebRTC() {
277
- const config = __RTC_CONFIGURATION__;
278
- peerConnection = new RTCPeerConnection(config);
279
-
280
- const timeoutId = setTimeout(() => {
281
- const toast = document.getElementById('error-toast');
282
- toast.textContent = "Connection is taking longer than usual. Are you on a VPN?";
283
- toast.className = 'toast warning';
284
- toast.style.display = 'block';
285
-
286
- // Hide warning after 5 seconds
287
- setTimeout(() => {
288
- toast.style.display = 'none';
289
- }, 5000);
290
- }, 5000);
291
-
292
- try {
293
- const stream = await navigator.mediaDevices.getUserMedia({
294
- audio: true
295
- });
296
-
297
- setupAudioVisualization(stream);
298
-
299
- stream.getTracks().forEach(track => {
300
- peerConnection.addTrack(track, stream);
301
- });
302
-
303
- // Add connection state change listener
304
- peerConnection.addEventListener('connectionstatechange', () => {
305
- console.log('connectionstatechange', peerConnection.connectionState);
306
- if (peerConnection.connectionState === 'connected') {
307
- clearTimeout(timeoutId);
308
- const toast = document.getElementById('error-toast');
309
- toast.style.display = 'none';
310
- }
311
- updateButtonState();
312
- });
313
-
314
- // Create data channel for messages
315
- const dataChannel = peerConnection.createDataChannel('text');
316
- dataChannel.onmessage = handleMessage;
317
-
318
- // Create and send offer
319
- const offer = await peerConnection.createOffer();
320
- await peerConnection.setLocalDescription(offer);
321
-
322
- await new Promise((resolve) => {
323
- if (peerConnection.iceGatheringState === "complete") {
324
- resolve();
325
- } else {
326
- const checkState = () => {
327
- if (peerConnection.iceGatheringState === "complete") {
328
- peerConnection.removeEventListener("icegatheringstatechange", checkState);
329
- resolve();
330
- }
331
- };
332
- peerConnection.addEventListener("icegatheringstatechange", checkState);
333
- }
334
- });
335
-
336
- webrtc_id = Math.random().toString(36).substring(7);
337
-
338
- const response = await fetch('/webrtc/offer', {
339
- method: 'POST',
340
- headers: { 'Content-Type': 'application/json' },
341
- body: JSON.stringify({
342
- sdp: peerConnection.localDescription.sdp,
343
- type: peerConnection.localDescription.type,
344
- webrtc_id: webrtc_id
345
- })
346
- });
347
-
348
- const serverResponse = await response.json();
349
-
350
- if (serverResponse.status === 'failed') {
351
- showError(serverResponse.meta.error === 'concurrency_limit_reached'
352
- ? `Too many connections. Maximum limit is ${serverResponse.meta.limit}`
353
- : serverResponse.meta.error);
354
- stop();
355
- startButton.textContent = 'Start Recording';
356
- return;
357
- }
358
-
359
- await peerConnection.setRemoteDescription(serverResponse);
360
-
361
- // Create event stream to receive transcripts
362
- eventSource = new EventSource('/transcript?webrtc_id=' + webrtc_id);
363
- eventSource.addEventListener("output", (event) => {
364
- appendTranscript(event.data);
365
- });
366
- } catch (err) {
367
- clearTimeout(timeoutId);
368
- console.error('Error setting up WebRTC:', err);
369
- showError('Failed to establish connection. Please try again.');
370
- stop();
371
- startButton.textContent = 'Start Recording';
372
- }
373
- }
374
-
375
- function appendTranscript(text) {
376
- const p = document.createElement('p');
377
- p.textContent = text;
378
- transcriptDiv.appendChild(p);
379
- transcriptDiv.scrollTop = transcriptDiv.scrollHeight;
380
- }
381
-
382
- function stop() {
383
- if (animationFrame) {
384
- cancelAnimationFrame(animationFrame);
385
- }
386
- if (audioContext) {
387
- audioContext.close();
388
- audioContext = null;
389
- analyser = null;
390
- audioSource = null;
391
- }
392
- if (peerConnection) {
393
- if (peerConnection.getTransceivers) {
394
- peerConnection.getTransceivers().forEach(transceiver => {
395
- if (transceiver.stop) {
396
- transceiver.stop();
397
- }
398
- });
399
- }
400
-
401
- if (peerConnection.getSenders) {
402
- peerConnection.getSenders().forEach(sender => {
403
- if (sender.track && sender.track.stop) sender.track.stop();
404
- });
405
- }
406
-
407
- peerConnection.close();
408
- peerConnection = null;
409
- }
410
- // Close EventSource connection
411
- if (eventSource) {
412
- eventSource.close();
413
- eventSource = null;
414
- }
415
- audioLevel = 0;
416
- updateButtonState();
417
- }
418
-
419
- startButton.addEventListener('click', () => {
420
- if (startButton.textContent === 'Start Recording') {
421
- setupWebRTC();
422
- } else {
423
- stop();
424
- }
425
- });
426
- </script>
427
  </body>
428
 
429
  </html>
 
188
  <!-- Add toast element after body opening tag -->
189
  <div id="error-toast" class="toast"></div>
190
  <div class="hero">
191
+ <h1>Real-time Transcription</h1>
192
+ <p>Powered by FastRTC and Local Whisper πŸ€—</p>
193
  </div>
194
 
195
  <div class="container">
 
199
  </div>
200
  </div>
201
 
202
+ <script src="/static/client.js"></script>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
  </body>
204
 
205
  </html>