text-completion / app.py
Quentin GALLOUÉDEC
try model
04e15ae
raw
history blame
1.74 kB
import gradio as gr
from transformers import AutoModelForCausalLM, pipeline
# Define the list of model names
models = ["gia-project/gia2-small-untrained", "gpt2"] # Add more model names as needed
# Dictionary to store loaded models and their pipelines
model_pipelines = {}
# Load a default model initially
default_model_name = "gia-project/gia2-small-untrained"
default_model = AutoModelForCausalLM.from_pretrained(default_model_name, trust_remote_code=True)
default_generator = pipeline("text-generation", model=default_model, tokenizer="gpt2", trust_remote_code=True)
model_pipelines[default_model_name] = default_generator
def generate_text(model_name, input_text):
# Check if the selected model is already loaded
if model_name not in model_pipelines:
# Load the model and create a pipeline if it's not already loaded
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
generator = pipeline("text-generation", model=model, tokenizer="gpt2", trust_remote_code=True)
model_pipelines[model_name] = generator
# Get the pipeline for the selected model and generate text
generator = model_pipelines[model_name]
generated_text = generator(input_text)[0]['generated_text']
return generated_text
# Define the Gradio interface
iface = gr.Interface(
fn=generate_text, # Function to be called on user input
inputs=[
gr.inputs.Dropdown(choices=models, label="Select Model"), # Dropdown to select model
gr.inputs.Textbox(lines=5, label="Input Text") # Textbox for entering text
],
outputs=gr.outputs.Textbox(label="Generated Text"), # Textbox to display the generated text
)
# Launch the Gradio interface
iface.launch()