Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
840333c
1
Parent(s):
82022e9
Batched code
Browse files
app.py
CHANGED
@@ -224,7 +224,7 @@ def generate_base(subject, setting):
|
|
224 |
speech_output = model.generate(input_ids=description_tokens, prompt_input_ids=story_tokens)
|
225 |
speech_output = [output.cpu().numpy() for output in speech_output]
|
226 |
gr.Info("Generated Audio")
|
227 |
-
return None, None, {"audio": speech_output, "text":
|
228 |
|
229 |
def stream_audio(state):
|
230 |
speech_output = state["audio"]
|
@@ -237,7 +237,7 @@ def stream_audio(state):
|
|
237 |
# print(f"i, j, time: {i}, {j} {datetime.datetime.now()}")
|
238 |
print(f"Sample of length: {round(new_audio.shape[0] / sampling_rate, 2)} seconds")
|
239 |
story += f"{sentence}\n"
|
240 |
-
yield story,
|
241 |
|
242 |
# BATCH_SIZE = 4
|
243 |
# for i in range(0, len(model_input), BATCH_SIZE):
|
|
|
224 |
speech_output = model.generate(input_ids=description_tokens, prompt_input_ids=story_tokens)
|
225 |
speech_output = [output.cpu().numpy() for output in speech_output]
|
226 |
gr.Info("Generated Audio")
|
227 |
+
return None, None, {"audio": speech_output, "text": model_input_tokens}
|
228 |
|
229 |
def stream_audio(state):
|
230 |
speech_output = state["audio"]
|
|
|
237 |
# print(f"i, j, time: {i}, {j} {datetime.datetime.now()}")
|
238 |
print(f"Sample of length: {round(new_audio.shape[0] / sampling_rate, 2)} seconds")
|
239 |
story += f"{sentence}\n"
|
240 |
+
yield story, numpy_to_mp3(new_audio, sampling_rate=sampling_rate)
|
241 |
|
242 |
# BATCH_SIZE = 4
|
243 |
# for i in range(0, len(model_input), BATCH_SIZE):
|