import torch import gradio as gr model = torch.load("GPT2Model.pt") tokenizer = torch.load("GPT2Tokenizer.pt") def gpt2(prompt): input_ids = tokenizer(prompt, return_tensors="pt").input_ids outputs = model.generate(input_ids, do_sample=False, max_length=30) return tokenizer.batch_decode(outputs, skip_special_tokens=True)[0] demo = gr.Interface(fn=gpt2, inputs="text", outputs="text") demo.launch()