Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
eb54e69
1
Parent(s):
e09206d
Now compatible with new inference pipeline
Browse files
app.py
CHANGED
@@ -39,7 +39,8 @@ def main(reference_paths, text_prompt, denoise, avg_style, stabilize):
|
|
39 |
}
|
40 |
|
41 |
with torch.no_grad():
|
42 |
-
|
|
|
43 |
r = r / np.abs(r).max()
|
44 |
|
45 |
sf.write("output.wav", r, samplerate=24000)
|
@@ -68,7 +69,7 @@ def on_file_upload(file_list):
|
|
68 |
|
69 |
def gen_example(reference_paths, text_prompt):
|
70 |
output, status = main(reference_paths, text_prompt, 0.6, True, True)
|
71 |
-
return output,
|
72 |
|
73 |
|
74 |
# Gradio UI
|
@@ -121,7 +122,7 @@ with gr.Blocks() as demo:
|
|
121 |
)
|
122 |
|
123 |
gr.Examples(
|
124 |
-
examples=[[eg_voices[0], eg_texts[0]], [eg_voices, eg_texts[1]]],
|
125 |
inputs=[reference_audios, text_prompt],
|
126 |
outputs=[synthesized_audio, reference_audios, status],
|
127 |
fn=gen_example,
|
|
|
39 |
}
|
40 |
|
41 |
with torch.no_grad():
|
42 |
+
styles = model.get_styles(speakers, denoise, avg_style)
|
43 |
+
r = model.generate(text_prompt, styles, stabilize, 18, "[id_1]")
|
44 |
r = r / np.abs(r).max()
|
45 |
|
46 |
sf.write("output.wav", r, samplerate=24000)
|
|
|
69 |
|
70 |
def gen_example(reference_paths, text_prompt):
|
71 |
output, status = main(reference_paths, text_prompt, 0.6, True, True)
|
72 |
+
return output, reference_paths, status
|
73 |
|
74 |
|
75 |
# Gradio UI
|
|
|
122 |
)
|
123 |
|
124 |
gr.Examples(
|
125 |
+
examples=[[[eg_voices[0]], eg_texts[0]], [eg_voices, eg_texts[1]]],
|
126 |
inputs=[reference_audios, text_prompt],
|
127 |
outputs=[synthesized_audio, reference_audios, status],
|
128 |
fn=gen_example,
|