Krishna086 commited on
Commit
2c29438
·
verified ·
1 Parent(s): e3b5cc2

Update translation.py

Browse files
Files changed (1) hide show
  1. translation.py +3 -3
translation.py CHANGED
@@ -12,7 +12,7 @@ def _load_default_model():
12
  @st.cache_resource
13
  def load_model(source_lang, target_lang):
14
  try:
15
- if source_lang == target_lang: # Avoid same language error
16
  return _load_default_model()
17
  model_name = f"Helsinki-NLP/opus-mt-{source_lang}-{target_lang}"
18
  tokenizer = MarianTokenizer.from_pretrained(model_name)
@@ -22,7 +22,7 @@ def load_model(source_lang, target_lang):
22
  st.warning(f"No direct model for {source_lang} to {target_lang}. Using cached en-fr.")
23
  return _load_default_model()
24
 
25
- @st.cache_data
26
  def translate_cached(text, source_lang, target_lang):
27
  src_code = {"English": "en", "French": "fr", "Spanish": "es", "German": "de",
28
  "Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}.get(source_lang, "en")
@@ -31,7 +31,7 @@ def translate_cached(text, source_lang, target_lang):
31
  tokenizer, model = load_model(src_code, tgt_code)
32
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500)
33
  with torch.no_grad():
34
- translated = model.generate(**inputs, max_length=500)
35
  return tokenizer.decode(translated[0], skip_special_tokens=True)
36
 
37
  def translate(text, source_lang, target_lang):
 
12
  @st.cache_resource
13
  def load_model(source_lang, target_lang):
14
  try:
15
+ if source_lang == target_lang:
16
  return _load_default_model()
17
  model_name = f"Helsinki-NLP/opus-mt-{source_lang}-{target_lang}"
18
  tokenizer = MarianTokenizer.from_pretrained(model_name)
 
22
  st.warning(f"No direct model for {source_lang} to {target_lang}. Using cached en-fr.")
23
  return _load_default_model()
24
 
25
+ @st.cache_data(ttl=3600) # Cache for 1 hour to improve speed
26
  def translate_cached(text, source_lang, target_lang):
27
  src_code = {"English": "en", "French": "fr", "Spanish": "es", "German": "de",
28
  "Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}.get(source_lang, "en")
 
31
  tokenizer, model = load_model(src_code, tgt_code)
32
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500)
33
  with torch.no_grad():
34
+ translated = model.generate(**inputs, max_length=500, num_beams=2, early_stopping=True) # Beam search for speed
35
  return tokenizer.decode(translated[0], skip_special_tokens=True)
36
 
37
  def translate(text, source_lang, target_lang):