Update app.py
Browse files
app.py
CHANGED
@@ -4,12 +4,13 @@ import torch
|
|
4 |
|
5 |
# تحميل النموذج والـ Tokenizer
|
6 |
model_path = "./saved_model"
|
|
|
7 |
|
8 |
try:
|
|
|
9 |
model = T5ForConditionalGeneration.from_pretrained(model_path)
|
10 |
-
|
11 |
-
device
|
12 |
-
model.to(device)
|
13 |
model_loaded = True
|
14 |
except Exception as e:
|
15 |
st.error(f"Error loading model: {e}")
|
@@ -19,64 +20,28 @@ except Exception as e:
|
|
19 |
def generate_summary(text):
|
20 |
try:
|
21 |
inputs = ["summarize: " + text]
|
22 |
-
inputs = tokenizer(inputs, max_length=1024, truncation=True, return_tensors="pt").to(device)
|
23 |
outputs = model.generate(inputs.input_ids, max_length=150, length_penalty=2.0, num_beams=4, early_stopping=True)
|
24 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
25 |
except Exception as e:
|
26 |
st.error(f"Error generating summary: {e}")
|
27 |
return None
|
28 |
|
29 |
-
#
|
30 |
-
|
31 |
-
secondary_color = "#E6E6FA" # بنفسجي فاتح
|
32 |
-
text_color = "#4A4A4A" # رمادي داكن
|
33 |
|
34 |
-
|
35 |
-
st.set_page_config(page_title="Text Summarizer", page_icon=":sparkles:", layout="wide")
|
36 |
|
37 |
-
st.
|
38 |
-
f"""
|
39 |
-
<style>
|
40 |
-
body {{
|
41 |
-
color: {text_color};
|
42 |
-
background-color: {secondary_color};
|
43 |
-
}}
|
44 |
-
.stApp {{
|
45 |
-
background-color: {secondary_color};
|
46 |
-
}}
|
47 |
-
.stButton button {{
|
48 |
-
background-color: {primary_color};
|
49 |
-
color: white;
|
50 |
-
}}
|
51 |
-
.stTextArea textarea {{
|
52 |
-
background-color: white;
|
53 |
-
}}
|
54 |
-
.stTextInput input {{
|
55 |
-
background-color: white;
|
56 |
-
}}
|
57 |
-
</style>
|
58 |
-
""",
|
59 |
-
unsafe_allow_html=True,
|
60 |
-
)
|
61 |
-
|
62 |
-
st.title("✨ Text Summarization Application ✨")
|
63 |
-
|
64 |
-
st.markdown("Enter the text you want to summarize, and the application will generate a concise summary.")
|
65 |
-
|
66 |
-
text = st.text_area("Enter text here:", height=200)
|
67 |
-
|
68 |
-
if st.button("Summarize"):
|
69 |
if text and model_loaded:
|
70 |
with st.spinner("Generating summary..."):
|
71 |
summary = generate_summary(text)
|
72 |
if summary:
|
73 |
-
st.
|
74 |
st.write(summary)
|
75 |
else:
|
76 |
st.warning("Summary generation failed. Please check the input text.")
|
77 |
elif not model_loaded:
|
78 |
st.error("Model failed to load. Please check the application logs.")
|
79 |
else:
|
80 |
-
st.warning("Please enter text.")
|
81 |
-
|
82 |
-
st.markdown("---")
|
|
|
4 |
|
5 |
# تحميل النموذج والـ Tokenizer
|
6 |
model_path = "./saved_model"
|
7 |
+
tokenizer_path = "./saved_tokenizer"
|
8 |
|
9 |
try:
|
10 |
+
tokenizer = T5Tokenizer.from_pretrained(tokenizer_path)
|
11 |
model = T5ForConditionalGeneration.from_pretrained(model_path)
|
12 |
+
device = torch.device("cpu") # تحديد الجهاز على CPU
|
13 |
+
model.to(device) # نقل النموذج إلى CPU
|
|
|
14 |
model_loaded = True
|
15 |
except Exception as e:
|
16 |
st.error(f"Error loading model: {e}")
|
|
|
20 |
def generate_summary(text):
|
21 |
try:
|
22 |
inputs = ["summarize: " + text]
|
23 |
+
inputs = tokenizer(inputs, max_length=1024, truncation=True, return_tensors="pt").to(device) # نقل ال inputs الي ال CPU
|
24 |
outputs = model.generate(inputs.input_ids, max_length=150, length_penalty=2.0, num_beams=4, early_stopping=True)
|
25 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
26 |
except Exception as e:
|
27 |
st.error(f"Error generating summary: {e}")
|
28 |
return None
|
29 |
|
30 |
+
# واجهة المستخدم باستخدام Streamlit
|
31 |
+
st.title("application summarize")
|
|
|
|
|
32 |
|
33 |
+
text = st.text_area("enter text you want to summarize.")
|
|
|
34 |
|
35 |
+
if st.button("summary:"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
if text and model_loaded:
|
37 |
with st.spinner("Generating summary..."):
|
38 |
summary = generate_summary(text)
|
39 |
if summary:
|
40 |
+
st.write("summary")
|
41 |
st.write(summary)
|
42 |
else:
|
43 |
st.warning("Summary generation failed. Please check the input text.")
|
44 |
elif not model_loaded:
|
45 |
st.error("Model failed to load. Please check the application logs.")
|
46 |
else:
|
47 |
+
st.warning("Please enter text.")
|
|
|
|