Spaces:
Sleeping
Sleeping
File size: 1,742 Bytes
acb75f7 dea2cef acb75f7 dea2cef acb75f7 dea2cef acb75f7 dea2cef acb75f7 dea2cef acb75f7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import gradio as gr
from transformers import MarianMTModel, MarianTokenizer
# Assuming the environment is set up for GPU use if available
# This is more about the environment setup than code modification
# Define a function that loads a model and tokenizer based on the chosen language
def load_model(lang_pair):
model_name = {
"English to French": 'Helsinki-NLP/opus-mt-en-fr',
"Kinyarwanda to English": 'Helsinki-NLP/opus-mt-rw-en'
}[lang_pair]
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)
return model, tokenizer
# Example function that could be used for caching (conceptual implementation)
cache = {}
def get_translation_from_cache_or_model(model, tokenizer, text):
if text in cache:
return cache[text]
model_inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
gen = model.generate(**model_inputs)
translation = tokenizer.batch_decode(gen, skip_special_tokens=True)[0]
cache[text] = translation
return translation
# Function to translate text based on selected language
def translate(lang_pair, text):
model, tokenizer = load_model(lang_pair)
# Use the caching function
translation = get_translation_from_cache_or_model(model, tokenizer, text)
return translation
# Create a Gradio interface with a dropdown menu for language selection
iface = gr.Interface(
fn=translate,
inputs=[gr.Dropdown(choices=["English to French", "Kinyarwanda to English"], label="Select Language Pair"),
gr.Textbox(lines=2, placeholder="Enter Text...")],
outputs=gr.Textbox(label="Translation")
)
# Launch the interface
iface.launch(debug=True,inline=False)
|