Spaces:
Runtime error
Runtime error
Primer_commit
Browse files- app.py +53 -0
- requirements.txt +5 -0
app.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torchaudio
|
3 |
+
import torch
|
4 |
+
from fairseq import checkpoint_utils
|
5 |
+
import numpy as np
|
6 |
+
import tempfile
|
7 |
+
import os
|
8 |
+
|
9 |
+
# Verificar si CUDA est谩 disponible
|
10 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
11 |
+
print(f"Usando {device} para la clonaci贸n de voz")
|
12 |
+
|
13 |
+
# Cargar el modelo en GPU si est谩 disponible
|
14 |
+
models, cfg, task = checkpoint_utils.load_model_ensemble_and_task(["https://dl.fbaipublicfiles.com/vits/model.pt"])
|
15 |
+
model = models[0].to(device)
|
16 |
+
model.eval()
|
17 |
+
|
18 |
+
def clone_voice(reference_audio, text):
|
19 |
+
# Convertir el audio de referencia a tensor
|
20 |
+
waveform, sample_rate = torchaudio.load(reference_audio.name)
|
21 |
+
|
22 |
+
# Normalizar el audio de referencia
|
23 |
+
waveform = waveform.mean(dim=0) # Convertir a mono
|
24 |
+
waveform = torchaudio.transforms.Resample(sample_rate, 22050)(waveform) # Asegurar 22.05 kHz
|
25 |
+
|
26 |
+
# Convertir el audio a tensor en la GPU si est谩 disponible
|
27 |
+
waveform = waveform.unsqueeze(0).to(device)
|
28 |
+
|
29 |
+
# Extraer la huella de voz del hablante
|
30 |
+
speaker_embedding = model.get_speaker_embedding(waveform)
|
31 |
+
|
32 |
+
# Generar la voz clonada
|
33 |
+
synthesized_waveform = model.synthesize(text, speaker_embedding)
|
34 |
+
|
35 |
+
# Pasar el audio generado a la CPU para guardarlo
|
36 |
+
synthesized_waveform = synthesized_waveform.cpu()
|
37 |
+
|
38 |
+
# Guardar temporalmente el audio generado
|
39 |
+
output_file = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
|
40 |
+
torchaudio.save(output_file.name, synthesized_waveform, 22050)
|
41 |
+
|
42 |
+
return output_file.name
|
43 |
+
|
44 |
+
# Crear interfaz Gradio
|
45 |
+
interface = gr.Interface(
|
46 |
+
fn=clone_voice,
|
47 |
+
inputs=[gr.Audio(type="file"), gr.Textbox(label="Texto a sintetizar")],
|
48 |
+
outputs=gr.Audio(label="Voz Clonada"),
|
49 |
+
title="Clonaci贸n de Voz con GPU",
|
50 |
+
description="Sube un audio de referencia y escribe un texto para clonarlo con aceleraci贸n en GPU (si est谩 disponible)."
|
51 |
+
)
|
52 |
+
|
53 |
+
interface.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
torchaudio
|
3 |
+
fairseq
|
4 |
+
numpy
|
5 |
+
torch
|