Update xtts.py
Browse files
xtts.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
import re, io, os, stat
|
2 |
import tempfile, subprocess
|
3 |
import requests
|
4 |
import torch
|
@@ -12,6 +12,8 @@ from flask import Flask, Blueprint, request, jsonify, send_file
|
|
12 |
import torch
|
13 |
import torchaudio
|
14 |
|
|
|
|
|
15 |
app = Flask(__name__)
|
16 |
# def upload_bytes(bytes, ext=".wav"):
|
17 |
# return bytes
|
@@ -52,13 +54,13 @@ def predict():
|
|
52 |
if tts is None:
|
53 |
TTS=import_module("TTS.api").TTS
|
54 |
model_name="tts_models/multilingual/multi-dataset/xtts_v2"
|
55 |
-
|
56 |
tts = TTS(model_name=model_name, progress_bar=False)
|
57 |
model=tts.synthesizer.tts_model
|
58 |
#hack to use cache
|
59 |
model.__get_conditioning_latents=model.get_conditioning_latents
|
60 |
model.get_conditioning_latents=get_conditioning_latents
|
61 |
-
|
62 |
|
63 |
wav = tts.tts(
|
64 |
text,
|
@@ -76,7 +78,7 @@ def predict():
|
|
76 |
scipy.io.wavfile.write(wav_buffer, tts.synthesizer.output_sample_rate, wav_norm)
|
77 |
wav_bytes = wav_buffer.getvalue()
|
78 |
url= upload_bytes(wav_bytes, ext=".wav")
|
79 |
-
|
80 |
return url
|
81 |
except Exception as e:
|
82 |
traceback.print_exc()
|
@@ -105,14 +107,14 @@ def get_conditioning_latents(audio_path, **others):
|
|
105 |
gpt_cond_latent,
|
106 |
speaker_embedding,
|
107 |
) = torch.load(pt_file)
|
108 |
-
|
109 |
except:
|
110 |
(
|
111 |
gpt_cond_latent,
|
112 |
speaker_embedding,
|
113 |
) = model.__get_conditioning_latents(audio_path=speaker_wav, **others)
|
114 |
torch.save((gpt_cond_latent,speaker_embedding), pt_file)
|
115 |
-
|
116 |
return gpt_cond_latent,speaker_embedding
|
117 |
|
118 |
def download(url):
|
@@ -147,5 +149,5 @@ def trim_sample_audio(speaker_wav):
|
|
147 |
return speaker_wav
|
148 |
|
149 |
for key, value in os.environ.items():
|
150 |
-
|
151 |
-
|
|
|
1 |
+
import re, io, os, stat, logging
|
2 |
import tempfile, subprocess
|
3 |
import requests
|
4 |
import torch
|
|
|
12 |
import torch
|
13 |
import torchaudio
|
14 |
|
15 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
16 |
+
|
17 |
app = Flask(__name__)
|
18 |
# def upload_bytes(bytes, ext=".wav"):
|
19 |
# return bytes
|
|
|
54 |
if tts is None:
|
55 |
TTS=import_module("TTS.api").TTS
|
56 |
model_name="tts_models/multilingual/multi-dataset/xtts_v2"
|
57 |
+
logging.info(f"loading model {model_name} ...")
|
58 |
tts = TTS(model_name=model_name, progress_bar=False)
|
59 |
model=tts.synthesizer.tts_model
|
60 |
#hack to use cache
|
61 |
model.__get_conditioning_latents=model.get_conditioning_latents
|
62 |
model.get_conditioning_latents=get_conditioning_latents
|
63 |
+
logging.info("model is ready")
|
64 |
|
65 |
wav = tts.tts(
|
66 |
text,
|
|
|
78 |
scipy.io.wavfile.write(wav_buffer, tts.synthesizer.output_sample_rate, wav_norm)
|
79 |
wav_bytes = wav_buffer.getvalue()
|
80 |
url= upload_bytes(wav_bytes, ext=".wav")
|
81 |
+
logging.debug(f'wav is at {url}')
|
82 |
return url
|
83 |
except Exception as e:
|
84 |
traceback.print_exc()
|
|
|
107 |
gpt_cond_latent,
|
108 |
speaker_embedding,
|
109 |
) = torch.load(pt_file)
|
110 |
+
logging.debug(f'sample wav info loaded from {pt_file}')
|
111 |
except:
|
112 |
(
|
113 |
gpt_cond_latent,
|
114 |
speaker_embedding,
|
115 |
) = model.__get_conditioning_latents(audio_path=speaker_wav, **others)
|
116 |
torch.save((gpt_cond_latent,speaker_embedding), pt_file)
|
117 |
+
logging.debug(f'sample wav info saved to {pt_file}')
|
118 |
return gpt_cond_latent,speaker_embedding
|
119 |
|
120 |
def download(url):
|
|
|
149 |
return speaker_wav
|
150 |
|
151 |
for key, value in os.environ.items():
|
152 |
+
logging.info(f"{key}: {value}")
|
153 |
+
loggin.info("xtts is ready")
|