freddyaboulton HF Staff commited on
Commit
ee59b20
·
verified ·
1 Parent(s): 4b4bdb0

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +11 -10
app.py CHANGED
@@ -8,7 +8,13 @@ import numpy as np
8
  from dotenv import load_dotenv
9
  from elevenlabs import ElevenLabs
10
  from fastapi.responses import HTMLResponse, StreamingResponse
11
- from fastrtc import AdditionalOutputs, ReplyOnPause, Stream, get_twilio_turn_credentials
 
 
 
 
 
 
12
  from fastrtc.utils import aggregate_bytes_to_16bit, audio_to_bytes
13
  from gradio.utils import get_space
14
  from groq import Groq
@@ -22,6 +28,8 @@ tts_client = ElevenLabs(api_key=os.environ["ELEVENLABS_API_KEY"])
22
 
23
  curr_dir = Path(__file__).parent
24
 
 
 
25
 
26
  def response(
27
  audio: tuple[int, np.ndarray],
@@ -49,15 +57,8 @@ def response(
49
  )
50
  chatbot.append({"role": "assistant", "content": response_text})
51
  yield AdditionalOutputs(chatbot)
52
- iterator = tts_client.text_to_speech.convert_as_stream(
53
- text=response_text,
54
- voice_id="JBFqnCBsd6RMkjVDRZzb",
55
- model_id="eleven_multilingual_v2",
56
- output_format="pcm_24000",
57
- )
58
- for chunk in aggregate_bytes_to_16bit(iterator):
59
- audio_array = np.frombuffer(chunk, dtype=np.int16).reshape(1, -1)
60
- yield (24000, audio_array, "mono")
61
 
62
 
63
  chatbot = gr.Chatbot(type="messages")
 
8
  from dotenv import load_dotenv
9
  from elevenlabs import ElevenLabs
10
  from fastapi.responses import HTMLResponse, StreamingResponse
11
+ from fastrtc import (
12
+ AdditionalOutputs,
13
+ ReplyOnPause,
14
+ Stream,
15
+ get_twilio_turn_credentials,
16
+ get_tts_model,
17
+ )
18
  from fastrtc.utils import aggregate_bytes_to_16bit, audio_to_bytes
19
  from gradio.utils import get_space
20
  from groq import Groq
 
28
 
29
  curr_dir = Path(__file__).parent
30
 
31
+ tts_model = get_tts_model()
32
+
33
 
34
  def response(
35
  audio: tuple[int, np.ndarray],
 
57
  )
58
  chatbot.append({"role": "assistant", "content": response_text})
59
  yield AdditionalOutputs(chatbot)
60
+ for chunk in tts_model.stream_tts_sync(response_text):
61
+ yield chunk
 
 
 
 
 
 
 
62
 
63
 
64
  chatbot = gr.Chatbot(type="messages")