Spaces:
Configuration error
Configuration error
Delete README.md
Browse files
README.md
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
---
|
2 |
-
license: cc-by-nc-4.0
|
3 |
-
library_name: fairseq
|
4 |
-
task: audio-to-audio
|
5 |
-
tags:
|
6 |
-
- fairseq
|
7 |
-
- audio
|
8 |
-
- audio-to-audio
|
9 |
-
- speech-to-speech-translation
|
10 |
-
|
11 |
-
datasets:
|
12 |
-
- Must-C
|
13 |
-
- TAT
|
14 |
-
- Hokkien dramas
|
15 |
-
|
16 |
-
---
|
17 |
-
## xm_transformer_s2ut_hk-en
|
18 |
-
|
19 |
-
Speech-to-speech translation model with single-pass decoder (S2UT) from fairseq:
|
20 |
-
- Hokkien-English
|
21 |
-
- Trained with supervised data in TED, drama, [TAT](https://sites.google.com/speech.ntut.edu.tw/fsw/home/tat-corpus) domain, and weakly supervised data in drama domain. See [here](https://research.facebook.com/publications/hokkien-direct-speech-to-speech-translation)
|
22 |
-
for training details.
|
23 |
-
- Speech synthesis with [facebook/unit_hifigan_mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj_dur](https://huggingface.co/facebook/unit_hifigan_mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj_dur)
|
24 |
-
- [Project Page](https://github.com/facebookresearch/fairseq/tree/ust/examples/hokkien)
|
25 |
-
|
26 |
-
## Usage
|
27 |
-
```python
|
28 |
-
import json
|
29 |
-
import os
|
30 |
-
from pathlib import Path
|
31 |
-
|
32 |
-
import IPython.display as ipd
|
33 |
-
from fairseq import hub_utils
|
34 |
-
from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
|
35 |
-
from fairseq.models.speech_to_text.hub_interface import S2THubInterface
|
36 |
-
from fairseq.models.text_to_speech import CodeHiFiGANVocoder
|
37 |
-
from fairseq.models.text_to_speech.hub_interface import VocoderHubInterface
|
38 |
-
|
39 |
-
from huggingface_hub import snapshot_download
|
40 |
-
import torchaudio
|
41 |
-
|
42 |
-
cache_dir = os.getenv("HUGGINGFACE_HUB_CACHE")
|
43 |
-
|
44 |
-
models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
|
45 |
-
"facebook/xm_transformer_s2ut_hk-en",
|
46 |
-
arg_overrides={"config_yaml": "config.yaml", "task": "speech_to_text"},
|
47 |
-
cache_dir=cache_dir,
|
48 |
-
)
|
49 |
-
#model = models[0].cpu()
|
50 |
-
#cfg["task"].cpu = True
|
51 |
-
generator = task.build_generator([model], cfg)
|
52 |
-
|
53 |
-
|
54 |
-
# requires 16000Hz mono channel audio
|
55 |
-
audio, _ = torchaudio.load("/path/to/an/audio/file")
|
56 |
-
|
57 |
-
sample = S2THubInterface.get_model_input(task, audio)
|
58 |
-
unit = S2THubInterface.get_prediction(task, model, generator, sample)
|
59 |
-
|
60 |
-
# speech synthesis
|
61 |
-
library_name = "fairseq"
|
62 |
-
cache_dir = (
|
63 |
-
cache_dir or (Path.home() / ".cache" / library_name).as_posix()
|
64 |
-
)
|
65 |
-
cache_dir = snapshot_download(
|
66 |
-
f"facebook/unit_hifigan_mhubert_vp_en_es_fr_it3_400k_layer11_km1000_lj_dur", cache_dir=cache_dir, library_name=library_name
|
67 |
-
)
|
68 |
-
|
69 |
-
x = hub_utils.from_pretrained(
|
70 |
-
cache_dir,
|
71 |
-
"model.pt",
|
72 |
-
".",
|
73 |
-
archive_map=CodeHiFiGANVocoder.hub_models(),
|
74 |
-
config_yaml="config.json",
|
75 |
-
fp16=False,
|
76 |
-
is_vocoder=True,
|
77 |
-
)
|
78 |
-
|
79 |
-
with open(f"{x['args']['data']}/config.json") as f:
|
80 |
-
vocoder_cfg = json.load(f)
|
81 |
-
assert (
|
82 |
-
len(x["args"]["model_path"]) == 1
|
83 |
-
), "Too many vocoder models in the input"
|
84 |
-
|
85 |
-
vocoder = CodeHiFiGANVocoder(x["args"]["model_path"][0], vocoder_cfg)
|
86 |
-
tts_model = VocoderHubInterface(vocoder_cfg, vocoder)
|
87 |
-
|
88 |
-
tts_sample = tts_model.get_model_input(unit)
|
89 |
-
wav, sr = tts_model.get_prediction(tts_sample)
|
90 |
-
|
91 |
-
ipd.Audio(wav, rate=sr)
|
92 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|