dangtr0408 commited on
Commit
75aba2d
·
1 Parent(s): 2761ffc

minor change

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -4,7 +4,7 @@ import os
4
  import sys
5
  import soundfile as sf
6
  import numpy as np
7
- import torch.cuda
8
 
9
  repo_url = "https://huggingface.co/dangtr0408/StyleTTS2-lite-vi"
10
  repo_dir = "StyleTTS2-lite-vi"
@@ -20,7 +20,7 @@ device = 'cuda' if torch.cuda.is_available() else 'cpu'
20
 
21
  config_path = os.path.join(repo_dir, "Models", "config.yml")
22
  models_path = os.path.join(repo_dir, "Models", "model.pth")
23
- model = StyleTTS2(config_path, models_path).to(device)
24
 
25
  # Core inference function
26
  def process_inputs(text_prompt, reference_audio_paths,
@@ -35,7 +35,8 @@ def process_inputs(text_prompt, reference_audio_paths,
35
  "speed": 1.1
36
  }
37
 
38
- r = model.generate(text_prompt, speakers, avg_style, stabilize, denoise, n_merge, "[id_1]")
 
39
 
40
  r = r / np.abs(r).max()
41
  sf.write("output.wav", r, samplerate=24000)
 
4
  import sys
5
  import soundfile as sf
6
  import numpy as np
7
+ import torch
8
 
9
  repo_url = "https://huggingface.co/dangtr0408/StyleTTS2-lite-vi"
10
  repo_dir = "StyleTTS2-lite-vi"
 
20
 
21
  config_path = os.path.join(repo_dir, "Models", "config.yml")
22
  models_path = os.path.join(repo_dir, "Models", "model.pth")
23
+ model = StyleTTS2(config_path, models_path).eval().to(device)
24
 
25
  # Core inference function
26
  def process_inputs(text_prompt, reference_audio_paths,
 
35
  "speed": 1.1
36
  }
37
 
38
+ with torch.no_grad():
39
+ r = model.generate(text_prompt, speakers, avg_style, stabilize, denoise, n_merge, "[id_1]")
40
 
41
  r = r / np.abs(r).max()
42
  sf.write("output.wav", r, samplerate=24000)