MisterAI's picture
Create app.py
fd43011 verified
raw
history blame
2.17 kB
#V01
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Liste des modèles disponibles
model_list = [
"fbaldassarri/tiiuae_Falcon3-1B-Instruct-autogptq-int8-gs128-asym",
"MisterAI/jpacifico_Chocolatine-3B-Instruct-DPO-v1.2",
# Ajoutez d'autres modèles ici
]
def load_model(model_name):
"""Charge le modèle et le tokenizer"""
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
return model, tokenizer
def generate_text(model, tokenizer, input_text, max_length, temperature):
"""Génère du texte en utilisant le modèle"""
inputs = tokenizer(input_text, return_tensors="pt")
output = model.generate(**inputs, max_length=max_length, temperature=temperature)
return tokenizer.decode(output[0], skip_special_tokens=True)
def main(model_name, input_text, max_length, temperature):
"""Fonction principale pour générer le texte"""
model, tokenizer = load_model(model_name)
generated_text = generate_text(model, tokenizer, input_text, max_length, temperature)
return generated_text
demo = gr.Blocks()
with demo:
gr.Markdown("# Try It")
with gr.Row():
model_select = gr.Dropdown(model_list, label="Sélectionner un modèle")
load_button = gr.Button("Charger le modèle")
with gr.Row():
input_text = gr.Textbox(label="Texte d'entrée")
max_length_slider = gr.Slider(50, 500, label="Longueur maximale", value=200)
temperature_slider = gr.Slider(0.1, 1.0, label="Température", value=0.7)
submit_button = gr.Button("Soumettre")
output_text = gr.Textbox(label="Texte généré")
history = gr.JSON(label="Historique")
load_button.click(
load_model,
inputs=model_select,
outputs=None,
queue=False
)
submit_button.click(
main,
inputs=[model_select, input_text, max_length_slider, temperature_slider],
outputs=output_text,
queue=False
)
if __name__ == "__main__":
demo.launch()