mrprimenotes commited on
Commit
6c9dde7
·
verified ·
1 Parent(s): 19a5154

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -0
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import numpy as np
3
+ import gradio as gr
4
+ import random
5
+
6
+ from fastrtc import AdditionalOutputs, WebRTC, ReplyOnPause
7
+
8
+ possible_responses = [
9
+ "hello",
10
+ "hi",
11
+ "how's it going?",
12
+ "what's up?",
13
+ "how's your day?",
14
+ "how are you?",
15
+ "what's your name?",
16
+ "where are you from?",
17
+ "what do you do?",
18
+ "what's your favorite color?",
19
+ "what's your favorite food?",
20
+ "what's your favorite movie?",
21
+ ]
22
+
23
+
24
+ def transcribe(audio: tuple[int, np.ndarray]):
25
+ time.sleep(1)
26
+ transformers_convo = [
27
+ {"role": "assistant", "content": random.choice(possible_responses)}
28
+ ]
29
+ yield AdditionalOutputs(transformers_convo)
30
+
31
+
32
+ with gr.Blocks() as demo:
33
+ gr.HTML(
34
+ """
35
+ TEST
36
+ """
37
+ )
38
+ with gr.Row():
39
+ with gr.Column():
40
+ audio = WebRTC(
41
+ label="Stream",
42
+ mode="send",
43
+ modality="audio",
44
+ rtc_configuration={
45
+ "iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}],
46
+ "iceTransportPolicy": "all",
47
+ "iceCandidatePoolSize": 10,
48
+ "bundlePolicy": "max-bundle",
49
+ "rtcpMuxPolicy": "require",
50
+ "sdpSemantics": "unified-plan",
51
+ },
52
+ server_rtc_configuration={
53
+ "iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
54
+ },
55
+ )
56
+ with gr.Column():
57
+ transcript = gr.Chatbot(label="transcript", type="messages")
58
+
59
+ audio.stream(
60
+ ReplyOnPause(transcribe), inputs=[audio], outputs=[audio], time_limit=180
61
+ )
62
+ def handle_additional_outputs(outputs):
63
+ print(f"outputs: {outputs}")
64
+ return outputs
65
+
66
+ audio.on_additional_outputs(
67
+ fn=handle_additional_outputs,
68
+ outputs=[transcript],
69
+ concurrency_limit=10,
70
+ queue=False,
71
+ show_progress="hidden",
72
+ )
73
+ demo.launch(server_name="0.0.0.0", server_port=7860, share=True)