Krishna086 commited on
Commit
dc03f7a
·
verified ·
1 Parent(s): df996a9

Update translation.py

Browse files
Files changed (1) hide show
  1. translation.py +11 -7
translation.py CHANGED
@@ -4,22 +4,22 @@ import torch
4
 
5
  @st.cache_resource
6
  def _load_default_model():
7
- model_name = "Helsinki-NLP/opus-mt-en-fr" # Default model
8
  tokenizer = MarianTokenizer.from_pretrained(model_name)
9
  model = MarianMTModel.from_pretrained(model_name)
10
  return tokenizer, model
11
 
12
  @st.cache_resource
13
- def load_model(src_lang, tgt_lang):
14
  try:
15
- if src_lang == tgt_lang: # Handle same language case
16
  return _load_default_model()
17
- model_name = f"Helsinki-NLP/opus-mt-{src_lang}-{tgt_lang}"
18
  tokenizer = MarianTokenizer.from_pretrained(model_name)
19
  model = MarianMTModel.from_pretrained(model_name)
20
  return tokenizer, model
21
  except Exception as e:
22
- st.warning(f"No direct model for {src_lang} to {tgt_lang}. Using cached en-fr. Error suppressed.")
23
  return _load_default_model()
24
 
25
  @st.cache_data
@@ -28,7 +28,7 @@ def translate_cached(text, source_lang, target_lang):
28
  "Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}.get(source_lang, "en")
29
  tgt_code = {"English": "en", "French": "fr", "Spanish": "es", "German": "de",
30
  "Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}.get(target_lang, "fr")
31
- tokenizer, model = load_model(src_code, tgt_lang if src_lang != tgt_lang else "fr") # Avoid en-en error
32
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500)
33
  with torch.no_grad():
34
  translated = model.generate(**inputs, max_length=500)
@@ -37,7 +37,11 @@ def translate_cached(text, source_lang, target_lang):
37
  def translate(text, source_lang, target_lang):
38
  if not text:
39
  return "No text provided."
40
- return translate_cached(text, source_lang, target_lang)
 
 
 
 
41
 
42
  LANGUAGES = {"English": "en", "French": "fr", "Spanish": "es", "German": "de",
43
  "Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}
 
4
 
5
  @st.cache_resource
6
  def _load_default_model():
7
+ model_name = "Helsinki-NLP/opus-mt-en-fr"
8
  tokenizer = MarianTokenizer.from_pretrained(model_name)
9
  model = MarianMTModel.from_pretrained(model_name)
10
  return tokenizer, model
11
 
12
  @st.cache_resource
13
+ def load_model(source_lang, target_lang):
14
  try:
15
+ if source_lang == target_lang: # Avoid same language error
16
  return _load_default_model()
17
+ model_name = f"Helsinki-NLP/opus-mt-{source_lang}-{target_lang}"
18
  tokenizer = MarianTokenizer.from_pretrained(model_name)
19
  model = MarianMTModel.from_pretrained(model_name)
20
  return tokenizer, model
21
  except Exception as e:
22
+ st.warning(f"No direct model for {source_lang} to {target_lang}. Using cached en-fr.")
23
  return _load_default_model()
24
 
25
  @st.cache_data
 
28
  "Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}.get(source_lang, "en")
29
  tgt_code = {"English": "en", "French": "fr", "Spanish": "es", "German": "de",
30
  "Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}.get(target_lang, "fr")
31
+ tokenizer, model = load_model(src_code, tgt_code)
32
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500)
33
  with torch.no_grad():
34
  translated = model.generate(**inputs, max_length=500)
 
37
  def translate(text, source_lang, target_lang):
38
  if not text:
39
  return "No text provided."
40
+ try:
41
+ return translate_cached(text, source_lang, target_lang)
42
+ except Exception as e:
43
+ st.error(f"Translation error: {str(e)}. Using input as fallback.")
44
+ return text
45
 
46
  LANGUAGES = {"English": "en", "French": "fr", "Spanish": "es", "German": "de",
47
  "Hindi": "hi", "Chinese": "zh", "Arabic": "ar", "Russian": "ru", "Japanese": "ja"}