File size: 1,308 Bytes
d60a3d5
 
 
 
 
21fca8b
 
3917c39
d60a3d5
21fca8b
 
d60a3d5
21fca8b
 
 
 
d60a3d5
 
 
 
21fca8b
 
d60a3d5
 
 
21fca8b
d60a3d5
 
f4c6da9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
from huggingface_hub import InferenceClient
import gradio as gr

client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")

def translate_text(input_text, target_language):
    prompt = f"Translate the following text into {target_language}: {input_text}"
    response = client.text_generation(prompt, options={"wait_for_model": True})
    
    # Since the model's response includes the prompt, we extract only the translated text
    # Assuming the translated text follows immediately after the prompt
    translated_text = response[0]['generated_text']
    # Clean the response to display only the translated part
    # This might need to be adjusted based on how the model includes the prompt in its response
    clean_translation = translated_text[len(prompt):].strip()
    return clean_translation

iface = gr.Interface(
    fn=translate_text,
    inputs=[
        gr.Textbox(label="Text to Translate", placeholder="Enter the text you want to translate here..."),
        gr.Textbox(label="Target Language", placeholder="Enter the target language (e.g., French, Spanish)..."),
    ],
    outputs=gr.Textbox(label="Translated Text"),
    title="Simple Translator with Mixtral",
    description="Translate text to your specified language using the Mixtral model from Hugging Face."
)

iface.launch()