Summary / app.py
hadheedo's picture
Update app.py
fc91025 verified
raw
history blame
4.65 kB
import streamlit as st
from transformers import T5Tokenizer, T5ForConditionalGeneration
import torch
import os
# Custom CSS styling for a light, elegant design
st.markdown("""
<style>
.main {
background-color: #FAFAFA;
background-image: linear-gradient(135deg, #ffffff 0%, #e0f7fa 100%);
}
.stTextArea textarea {
border: 2px solid #81D4FA;
border-radius: 15px;
padding: 10px;
font-family: 'Segoe UI', sans-serif;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.05);
width: 100%;
max-width: 800px;
margin: 0 auto;
}
.stTextArea textarea:focus {
border-color: #29B6F6;
box-shadow: 0 0 10px #4FC3F7;
}
.stTitle {
color: #0288D1;
font-family: 'Segoe UI', sans-serif;
font-size: 3em !important;
text-align: center;
margin-bottom: 30px !important;
}
.stButton>button {
background-color: #29B6F6;
color: white;
border-radius: 20px;
border: none;
padding: 10px 25px;
font-size: 16px;
font-weight: bold;
box-shadow: 0 4px 12px rgba(41, 182, 246, 0.4);
transition: all 0.3s ease;
}
.stButton>button:hover {
background-color: #0288D1;
box-shadow: 0 6px 14px rgba(2, 136, 209, 0.5);
transform: translateY(-2px);
}
.summary-container {
background-color: #ffffff;
border-radius: 15px;
padding: 20px;
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.05);
border-left: 5px solid #29B6F6;
margin-top: 20px;
width: 100%;
max-width: 800px;
margin-left: auto;
margin-right: auto;
}
.summary-title {
color: #0288D1;
font-weight: bold;
font-size: 1.5em;
margin-bottom: 10px;
font-family: 'Segoe UI', sans-serif;
}
.footer {
text-align: center;
margin-top: 50px;
padding: 20px;
color: #0288D1;
font-style: italic;
}
</style>
""", unsafe_allow_html=True)
# Load model and tokenizer
model_path = "./saved_model"
tokenizer_path = "./saved_tokenizer" # Define this path for saved tokenizer
try:
# Check if the tokenizer is saved, if not, add custom tokens
if not os.path.exists(tokenizer_path):
tokenizer = T5Tokenizer.from_pretrained("t5-small")
tokenizer.add_tokens(['<extra_id_99>']) # Add custom token if required
tokenizer.save_pretrained(tokenizer_path)
else:
tokenizer = T5Tokenizer.from_pretrained(tokenizer_path, local_files_only=True)
model = T5ForConditionalGeneration.from_pretrained(model_path, local_files_only=True, ignore_mismatched_sizes=True)
device = torch.device("cpu")
model.to(device)
model_loaded = True
except Exception as e:
st.error(f"Error loading model: {e}")
model_loaded = False
def generate_summary(text):
try:
inputs = ["summarize: " + text]
inputs = tokenizer(inputs, max_length=1024, truncation=True, return_tensors="pt").to(device)
outputs = model.generate(
inputs.input_ids,
max_length=150,
length_penalty=2.0,
num_beams=4,
early_stopping=True
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
except Exception as e:
st.error(f"Error generating summary: {e}")
return None
st.title("🧠 Smart Text Summarizer")
st.markdown("""
<div style="text-align: center; margin-bottom: 30px;">
<img src="https://api.placeholder.com/300x150?text=Smart+Summary" width="300" class="header-image">
</div>
""", unsafe_allow_html=True)
text = st.text_area("Enter the text you want to summarize...", height=200)
col1, col2, col3 = st.columns([1, 2, 1])
with col2:
if st.button("πŸ” Generate Summary"):
if text and model_loaded:
with st.spinner("Generating summary..."):
summary = generate_summary(text)
if summary:
st.markdown('<div class="summary-container"><div class="summary-title">πŸ“‹ Summary</div>' +
summary + '</div>', unsafe_allow_html=True)
else:
st.error("❌ Failed to generate summary. Please check your input.")
elif not model_loaded:
st.error("❌ Failed to load model. Please check the application logs.")
else:
st.warning("⚠️ Please enter text to summarize.")
st.markdown("""
<div class="footer">
Smart Text Summarizer - Crafted with hadheedo
</div>
""", unsafe_allow_html=True)