TI / app.py
rajrakeshdr's picture
Update app.py
209d107 verified
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
# Cache the model and tokenizer
@st.cache_resource
def load_model_and_tokenizer():
model_name = "rajrakeshdr/IntelliSoc"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
return model, tokenizer
# Load the model and tokenizer
model, tokenizer = load_model_and_tokenizer()
# Streamlit app title
st.title("IntelliSoc Text Generation")
# Input prompt
prompt = st.text_area("Enter your prompt:", "Once upon a time")
# Generate text on button click
if st.button("Generate Text"):
# Tokenize input
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True)
# Generate text
outputs = model.generate(
inputs.input_ids,
max_length=100,
num_return_sequences=1,
no_repeat_ngram_size=2,
top_k=50,
top_p=0.95,
temperature=0.7
)
# Decode the generated text
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Display the generated text
st.write("Generated Text:")
st.write(generated_text)