File size: 1,742 Bytes
3c2fcf4
04e15ae
3c2fcf4
04e15ae
 
3c2fcf4
04e15ae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import gradio as gr
from transformers import AutoModelForCausalLM, pipeline

# Define the list of model names
models = ["gia-project/gia2-small-untrained", "gpt2"]  # Add more model names as needed

# Dictionary to store loaded models and their pipelines
model_pipelines = {}

# Load a default model initially
default_model_name = "gia-project/gia2-small-untrained"
default_model = AutoModelForCausalLM.from_pretrained(default_model_name, trust_remote_code=True)
default_generator = pipeline("text-generation", model=default_model, tokenizer="gpt2", trust_remote_code=True)
model_pipelines[default_model_name] = default_generator

def generate_text(model_name, input_text):
    # Check if the selected model is already loaded
    if model_name not in model_pipelines:
        # Load the model and create a pipeline if it's not already loaded
        model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
        generator = pipeline("text-generation", model=model, tokenizer="gpt2", trust_remote_code=True)
        model_pipelines[model_name] = generator
    
    # Get the pipeline for the selected model and generate text
    generator = model_pipelines[model_name]
    generated_text = generator(input_text)[0]['generated_text']
    return generated_text

# Define the Gradio interface
iface = gr.Interface(
    fn=generate_text,  # Function to be called on user input
    inputs=[
        gr.inputs.Dropdown(choices=models, label="Select Model"),  # Dropdown to select model
        gr.inputs.Textbox(lines=5, label="Input Text")  # Textbox for entering text
    ],
    outputs=gr.outputs.Textbox(label="Generated Text"),  # Textbox to display the generated text
)

# Launch the Gradio interface
iface.launch()