File size: 4,647 Bytes
f56c633
 
 
6df877a
f56c633
20374ce
c170413
 
 
e84051a
 
c170413
 
e84051a
c170413
 
e84051a
 
c170413
 
 
 
 
e84051a
 
c170413
 
e84051a
 
c170413
 
 
 
e84051a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c170413
e84051a
c170413
 
e84051a
 
c170413
 
 
 
 
 
 
e84051a
c170413
 
 
e84051a
c170413
 
 
 
 
e84051a
c170413
 
 
 
 
e84051a
20374ce
 
f56c633
 
fc91025
6df877a
 
fc91025
6df877a
 
 
 
e84051a
 
 
f56c633
 
 
 
 
 
 
 
c170413
 
e84051a
 
 
 
c170413
 
f56c633
 
 
 
 
20374ce
6df877a
20374ce
 
 
 
 
9bd3085
20374ce
c170413
20374ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c170413
 
 
6df877a
c170413
e84051a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
import streamlit as st
from transformers import T5Tokenizer, T5ForConditionalGeneration
import torch
import os

# Custom CSS styling for a light, elegant design
st.markdown("""
<style>
    .main {
        background-color: #FAFAFA;
        background-image: linear-gradient(135deg, #ffffff 0%, #e0f7fa 100%);
    }
    .stTextArea textarea {
        border: 2px solid #81D4FA;
        border-radius: 15px;
        padding: 10px;
        font-family: 'Segoe UI', sans-serif;
        box-shadow: 0 4px 6px rgba(0, 0, 0, 0.05);
        width: 100%;
        max-width: 800px;
        margin: 0 auto;
    }
    .stTextArea textarea:focus {
        border-color: #29B6F6;
        box-shadow: 0 0 10px #4FC3F7;
    }
    .stTitle {
        color: #0288D1;
        font-family: 'Segoe UI', sans-serif;
        font-size: 3em !important;
        text-align: center;
        margin-bottom: 30px !important;
    }
    .stButton>button {
        background-color: #29B6F6;
        color: white;
        border-radius: 20px;
        border: none;
        padding: 10px 25px;
        font-size: 16px;
        font-weight: bold;
        box-shadow: 0 4px 12px rgba(41, 182, 246, 0.4);
        transition: all 0.3s ease;
    }
    .stButton>button:hover {
        background-color: #0288D1;
        box-shadow: 0 6px 14px rgba(2, 136, 209, 0.5);
        transform: translateY(-2px);
    }
    .summary-container {
        background-color: #ffffff;
        border-radius: 15px;
        padding: 20px;
        box-shadow: 0 4px 10px rgba(0, 0, 0, 0.05);
        border-left: 5px solid #29B6F6;
        margin-top: 20px;
        width: 100%;
        max-width: 800px;
        margin-left: auto;
        margin-right: auto;
    }
    .summary-title {
        color: #0288D1;
        font-weight: bold;
        font-size: 1.5em;
        margin-bottom: 10px;
        font-family: 'Segoe UI', sans-serif;
    }
    .footer {
        text-align: center;
        margin-top: 50px;
        padding: 20px;
        color: #0288D1;
        font-style: italic;
    }
</style>
""", unsafe_allow_html=True)

# Load model and tokenizer
model_path = "./saved_model"
tokenizer_path = "./saved_tokenizer"  # Define this path for saved tokenizer

try:
    # Check if the tokenizer is saved, if not, add custom tokens
    if not os.path.exists(tokenizer_path):
        tokenizer = T5Tokenizer.from_pretrained("t5-small")
        tokenizer.add_tokens(['<extra_id_99>'])  # Add custom token if required
        tokenizer.save_pretrained(tokenizer_path)
    else:
        tokenizer = T5Tokenizer.from_pretrained(tokenizer_path, local_files_only=True)

    model = T5ForConditionalGeneration.from_pretrained(model_path, local_files_only=True, ignore_mismatched_sizes=True)
    device = torch.device("cpu")
    model.to(device)
    model_loaded = True
except Exception as e:
    st.error(f"Error loading model: {e}")
    model_loaded = False

def generate_summary(text):
    try:
        inputs = ["summarize: " + text]
        inputs = tokenizer(inputs, max_length=1024, truncation=True, return_tensors="pt").to(device)
        outputs = model.generate(
            inputs.input_ids,
            max_length=150,
            length_penalty=2.0,
            num_beams=4,
            early_stopping=True
        )
        return tokenizer.decode(outputs[0], skip_special_tokens=True)
    except Exception as e:
        st.error(f"Error generating summary: {e}")
        return None

st.title("🧠 Smart Text Summarizer")

st.markdown("""
<div style="text-align: center; margin-bottom: 30px;">
    <img src="https://api.placeholder.com/300x150?text=Smart+Summary" width="300" class="header-image">
</div>
""", unsafe_allow_html=True)

text = st.text_area("Enter the text you want to summarize...", height=200)

col1, col2, col3 = st.columns([1, 2, 1])
with col2:
    if st.button("πŸ” Generate Summary"):
        if text and model_loaded:
            with st.spinner("Generating summary..."):
                summary = generate_summary(text)
                if summary:
                    st.markdown('<div class="summary-container"><div class="summary-title">πŸ“‹ Summary</div>' + 
                                summary + '</div>', unsafe_allow_html=True)
                else:
                    st.error("❌ Failed to generate summary. Please check your input.")
        elif not model_loaded:
            st.error("❌ Failed to load model. Please check the application logs.")
        else:
            st.warning("⚠️ Please enter text to summarize.")

st.markdown("""
<div class="footer">
    Smart Text Summarizer - Crafted with hadheedo
</div>
""", unsafe_allow_html=True)