Spaces:
Sleeping
Sleeping
update
Browse files- main.py +2 -2
- toolbox/k2_sherpa/nn_models.py +5 -5
main.py
CHANGED
@@ -103,12 +103,12 @@ def process(
|
|
103 |
repo_id: Path = Path(repo_id)
|
104 |
if len(repo_id.parts) == 1:
|
105 |
repo_name = repo_id.parts[-1]
|
106 |
-
repo_name = repo_name[:
|
107 |
folder = repo_name
|
108 |
elif len(repo_id.parts) == 2:
|
109 |
repo_supplier = repo_id.parts[-2]
|
110 |
repo_name = repo_id.parts[-1]
|
111 |
-
repo_name = repo_name[:
|
112 |
folder = "{}/{}".format(repo_supplier, repo_name)
|
113 |
else:
|
114 |
raise AssertionError("repo_id parts count invalid: {}".format(len(repo_id.parts)))
|
|
|
103 |
repo_id: Path = Path(repo_id)
|
104 |
if len(repo_id.parts) == 1:
|
105 |
repo_name = repo_id.parts[-1]
|
106 |
+
repo_name = repo_name[:40]
|
107 |
folder = repo_name
|
108 |
elif len(repo_id.parts) == 2:
|
109 |
repo_supplier = repo_id.parts[-2]
|
110 |
repo_name = repo_id.parts[-1]
|
111 |
+
repo_name = repo_name[:40]
|
112 |
folder = "{}/{}".format(repo_supplier, repo_name)
|
113 |
else:
|
114 |
raise AssertionError("repo_id parts count invalid: {}".format(len(repo_id.parts)))
|
toolbox/k2_sherpa/nn_models.py
CHANGED
@@ -241,19 +241,19 @@ def load_recognizer(local_model_dir: Path,
|
|
241 |
|
242 |
kwargs_ = dict()
|
243 |
if "nn_model_file" in kwargs.keys():
|
244 |
-
nn_model_file = (local_model_dir / kwargs["nn_model_file"]).as_posix()
|
245 |
kwargs_["nn_model_file"] = nn_model_file
|
246 |
if "encoder_model_file" in kwargs.keys():
|
247 |
-
encoder_model_file = (local_model_dir / kwargs["encoder_model_file"]).as_posix()
|
248 |
kwargs_["encoder_model_file"] = encoder_model_file
|
249 |
if "decoder_model_file" in kwargs.keys():
|
250 |
-
decoder_model_file = (local_model_dir / kwargs["decoder_model_file"]).as_posix()
|
251 |
kwargs_["decoder_model_file"] = decoder_model_file
|
252 |
if "joiner_model_file" in kwargs.keys():
|
253 |
-
joiner_model_file = (local_model_dir / kwargs["joiner_model_file"]).as_posix()
|
254 |
kwargs_["joiner_model_file"] = joiner_model_file
|
255 |
if "tokens_file" in kwargs.keys():
|
256 |
-
tokens_file = (local_model_dir / kwargs["tokens_file"]).as_posix()
|
257 |
kwargs_["tokens_file"] = tokens_file
|
258 |
if "normalize_samples" in kwargs.keys():
|
259 |
kwargs_["normalize_samples"] = kwargs["normalize_samples"]
|
|
|
241 |
|
242 |
kwargs_ = dict()
|
243 |
if "nn_model_file" in kwargs.keys():
|
244 |
+
nn_model_file = (local_model_dir / kwargs["nn_model_file_sub_folder"] / kwargs["nn_model_file"]).as_posix()
|
245 |
kwargs_["nn_model_file"] = nn_model_file
|
246 |
if "encoder_model_file" in kwargs.keys():
|
247 |
+
encoder_model_file = (local_model_dir / kwargs["encoder_model_file_sub_folder"] / kwargs["encoder_model_file"]).as_posix()
|
248 |
kwargs_["encoder_model_file"] = encoder_model_file
|
249 |
if "decoder_model_file" in kwargs.keys():
|
250 |
+
decoder_model_file = (local_model_dir / kwargs["decoder_model_file_sub_folder"] / kwargs["decoder_model_file"]).as_posix()
|
251 |
kwargs_["decoder_model_file"] = decoder_model_file
|
252 |
if "joiner_model_file" in kwargs.keys():
|
253 |
+
joiner_model_file = (local_model_dir / kwargs["joiner_model_file_sub_folder"] / kwargs["joiner_model_file"]).as_posix()
|
254 |
kwargs_["joiner_model_file"] = joiner_model_file
|
255 |
if "tokens_file" in kwargs.keys():
|
256 |
+
tokens_file = (local_model_dir / kwargs["tokens_file_sub_folder"] / kwargs["tokens_file"]).as_posix()
|
257 |
kwargs_["tokens_file"] = tokens_file
|
258 |
if "normalize_samples" in kwargs.keys():
|
259 |
kwargs_["normalize_samples"] = kwargs["normalize_samples"]
|