IngenuityPrompt / app.py
JothishJJ's picture
Update app.py
c38ae0b verified
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load GPT-2 model and tokenizer
MODEL_NAME = "distilgpt2"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
# Conversation history
conversation_history = ""
def chat(user_input):
global conversation_history
# Add user input to conversation history
conversation_history += f"User: {user_input}\n"
# Encode the conversation and generate a response
inputs = tokenizer(conversation_history, return_tensors="pt", truncation=True, max_length=1000)
outputs = model.generate(**inputs, max_length=200, num_return_sequences=1)
# Decode and add bot response to history
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
conversation_history += f"Bot: {response}\n"
return response
# Gradio interface
interface = gr.Interface(
fn=chat,
inputs=gr.Textbox(placeholder="Type your message here..."),
outputs="text",
title="GPT-2 Chatbot",
description="A simple chatbot powered by GPT-2"
)
if __name__ == "__main__":
interface.launch()