import streamlit as st
from transformers import T5Tokenizer, T5ForConditionalGeneration
import torch
import os
# Custom CSS styling for a light, elegant design
st.markdown("""
""", unsafe_allow_html=True)
# Load model and tokenizer
model_path = "./saved_model"
tokenizer_path = "./saved_tokenizer" # Define this path for saved tokenizer
try:
# Check if the tokenizer is saved, if not, add custom tokens
if not os.path.exists(tokenizer_path):
tokenizer = T5Tokenizer.from_pretrained("t5-small")
tokenizer.add_tokens(['']) # Add custom token if required
tokenizer.save_pretrained(tokenizer_path)
else:
tokenizer = T5Tokenizer.from_pretrained(tokenizer_path, local_files_only=True)
model = T5ForConditionalGeneration.from_pretrained(model_path, local_files_only=True, ignore_mismatched_sizes=True)
device = torch.device("cpu")
model.to(device)
model_loaded = True
except Exception as e:
st.error(f"Error loading model: {e}")
model_loaded = False
def generate_summary(text):
try:
inputs = ["summarize: " + text]
inputs = tokenizer(inputs, max_length=1024, truncation=True, return_tensors="pt").to(device)
outputs = model.generate(
inputs.input_ids,
max_length=150,
length_penalty=2.0,
num_beams=4,
early_stopping=True
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
except Exception as e:
st.error(f"Error generating summary: {e}")
return None
st.title("🧠 Smart Text Summarizer")
st.markdown("""
""", unsafe_allow_html=True)
text = st.text_area("Enter the text you want to summarize...", height=200)
col1, col2, col3 = st.columns([1, 2, 1])
with col2:
if st.button("🔍 Generate Summary"):
if text and model_loaded:
with st.spinner("Generating summary..."):
summary = generate_summary(text)
if summary:
st.markdown('', unsafe_allow_html=True)
else:
st.error("❌ Failed to generate summary. Please check your input.")
elif not model_loaded:
st.error("❌ Failed to load model. Please check the application logs.")
else:
st.warning("⚠️ Please enter text to summarize.")
st.markdown("""
""", unsafe_allow_html=True)