Spaces:
Sleeping
Sleeping
from huggingface_hub import InferenceClient | |
import gradio as gr | |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") | |
def translate_text(input_text, target_language): | |
prompt = f"Translate the following text into {target_language}: {input_text}" | |
response = client.text_generation(prompt, options={"wait_for_model": True}) | |
# Since the model's response includes the prompt, we extract only the translated text | |
# Assuming the translated text follows immediately after the prompt | |
translated_text = response[0]['generated_text'] | |
# Clean the response to display only the translated part | |
# This might need to be adjusted based on how the model includes the prompt in its response | |
clean_translation = translated_text[len(prompt):].strip() | |
return clean_translation | |
iface = gr.Interface( | |
fn=translate_text, | |
inputs=[ | |
gr.Textbox(label="Text to Translate", placeholder="Enter the text you want to translate here..."), | |
gr.Textbox(label="Target Language", placeholder="Enter the target language (e.g., French, Spanish)..."), | |
], | |
outputs=gr.Textbox(label="Translated Text"), | |
title="Simple Translator with Mixtral", | |
description="Translate text to your specified language using the Mixtral model from Hugging Face." | |
) | |
iface.launch() |