File size: 2,030 Bytes
76b06b6
b912ba6
122790b
b912ba6
a6fc19e
 
 
 
 
 
e22e364
 
dd34156
122790b
 
 
b912ba6
 
dc03f7a
b912ba6
2c29438
111af19
0b60e87
dc03f7a
0b60e87
 
 
 
 
 
 
 
 
 
 
dd34156
02a14aa
b912ba6
122790b
 
dd34156
dc03f7a
dd34156
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import streamlit as st
from transformers import MarianTokenizer, MarianMTModel
import torch

LANGUAGES = {
    "en": ("English", "English"), "fr": ("Français", "French"), "es": ("Español", "Spanish"),
    "de": ("Deutsch", "German"), "hi": ("हिन्दी", "Hindi"), "zh": ("中文", "Chinese"),
    "ar": ("العربية", "Arabic"), "ru": ("Русский", "Russian"), "ja": ("日本語", "Japanese")
}

@st.cache_resource
def _load_default_model():
    model_name = "Helsinki-NLP/opus-mt-en-hi"
    tokenizer = MarianTokenizer.from_pretrained(model_name)
    model = MarianMTModel.from_pretrained(model_name)
    return tokenizer, model

@st.cache_resource
def load_model(source_lang, target_lang):
    try:
        if source_lang == target_lang:
            return _load_default_model()
        # Try direct model first
        model_name = f"Helsinki-NLP/opus-mt-{source_lang}-{target_lang}"
        try:
            tokenizer = MarianTokenizer.from_pretrained(model_name)
            model = MarianMTModel.from_pretrained(model_name)
            return tokenizer, model
        except Exception:
            # Pivot through English for non-English pairs
            if source_lang != "en" and target_lang != "en":
                en_to_target = load_model("en", target_lang)
                source_to_en = load_model(source_lang, "en")
                return source_to_en if source_lang == "en" else en_to_target
            return _load_default_model()
    except Exception:
        return _load_default_model()

def translate(text, source_lang, target_lang):
    if not text:
        return ""
    try:
        tokenizer, model = load_model(source_lang, target_lang)
        inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=500)
        with torch.no_grad():
            translated = model.generate(**inputs, max_length=500, num_beams=2, early_stopping=True)
        return tokenizer.decode(translated[0], skip_special_tokens=True)
    except Exception:
        return text