Update translation.py
Browse files- translation.py +8 -2
translation.py
CHANGED
@@ -67,7 +67,13 @@ def load_model(source_lang, target_lang):
|
|
67 |
tokenizer_model_pair = all_models.get(model_key)
|
68 |
if tokenizer_model_pair and tokenizer_model_pair[0] and tokenizer_model_pair[1]:
|
69 |
return tokenizer_model_pair
|
70 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
default_tokenizer, default_model = _load_default_model()
|
72 |
return default_tokenizer, CombinedModel(source_lang, target_lang, default_tokenizer, default_model)
|
73 |
|
@@ -88,7 +94,7 @@ def translate(text, source_lang, target_lang):
|
|
88 |
tokenizer, model = load_model(source_lang, target_lang)
|
89 |
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500)
|
90 |
with torch.no_grad():
|
91 |
-
translated = model.generate(**inputs, max_length=1000 if target_lang == "hi" else 500, num_beams=
|
92 |
result = tokenizer.decode(translated[0], skip_special_tokens=True)
|
93 |
return result if result.strip() else text
|
94 |
except Exception as e:
|
|
|
67 |
tokenizer_model_pair = all_models.get(model_key)
|
68 |
if tokenizer_model_pair and tokenizer_model_pair[0] and tokenizer_model_pair[1]:
|
69 |
return tokenizer_model_pair
|
70 |
+
# Prefer direct model if available, then pivot
|
71 |
+
for src in [source_lang, "en"]:
|
72 |
+
for tgt in [target_lang, "en"]:
|
73 |
+
if src != tgt:
|
74 |
+
pair = all_models.get((src, tgt))
|
75 |
+
if pair and pair[0] and pair[1]:
|
76 |
+
return pair
|
77 |
default_tokenizer, default_model = _load_default_model()
|
78 |
return default_tokenizer, CombinedModel(source_lang, target_lang, default_tokenizer, default_model)
|
79 |
|
|
|
94 |
tokenizer, model = load_model(source_lang, target_lang)
|
95 |
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500)
|
96 |
with torch.no_grad():
|
97 |
+
translated = model.generate(**inputs, max_length=1000 if target_lang == "hi" else 500, num_beams=4, early_stopping=True) # Reduced to 4 beams for speed
|
98 |
result = tokenizer.decode(translated[0], skip_special_tokens=True)
|
99 |
return result if result.strip() else text
|
100 |
except Exception as e:
|