hadheedo commited on
Commit
f56c633
·
verified ·
1 Parent(s): 8619426

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -30
app.py CHANGED
@@ -1,30 +1,52 @@
1
- import streamlit as st
2
- from transformers import T5Tokenizer, T5ForConditionalGeneration
3
- import torch
4
-
5
-
6
- model_path = "./saved_model"
7
- tokenizer_path = "./saved_tokenizer"
8
-
9
- tokenizer = T5Tokenizer.from_pretrained(tokenizer_path)
10
- model = T5ForConditionalGeneration.from_pretrained(model_path)
11
-
12
- # دالة توليد الملخص
13
- def generate_summary(text):
14
- inputs = ["summarize: " + text]
15
- inputs = tokenizer(inputs, max_length=1024, truncation=True, return_tensors="pt")
16
- outputs = model.generate(inputs.input_ids.to(model.device), max_length=150, length_penalty=2.0, num_beams=4, early_stopping=True)
17
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
18
-
19
- # واجهة المستخدم باستخدام Streamlit
20
- st.title("application summarize")
21
-
22
- text = st.text_area("enter text you want to summarize.")
23
-
24
- if st.button("summary:"):
25
- if text:
26
- summary = generate_summary(text)
27
- st.write("summary")
28
- st.write(summary)
29
- else:
30
- st.write("please enter text.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
3
+ import torch
4
+
5
+ # Load model and tokenizer
6
+ model_path = "./saved_model"
7
+ tokenizer_path = "./saved_tokenizer"
8
+
9
+ try:
10
+ tokenizer = T5Tokenizer.from_pretrained(tokenizer_path)
11
+ model = T5ForConditionalGeneration.from_pretrained(model_path)
12
+ device = torch.device("cpu")
13
+ model.to(device)
14
+ model_loaded = True
15
+ except Exception as e:
16
+ st.error(f"Error loading model: {e}")
17
+ model_loaded = False
18
+
19
+ # Function to generate summary
20
+ def generate_summary(text):
21
+ try:
22
+ inputs = ["summarize: " + text]
23
+ inputs = tokenizer(inputs, max_length=1024, truncation=True, return_tensors="pt").to(device)
24
+ outputs = model.generate(inputs.input_ids, max_length=150, length_penalty=2.0, num_beams=4, early_stopping=True)
25
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
26
+ except Exception as e:
27
+ st.error(f"Error generating summary: {e}")
28
+ return None
29
+
30
+ # Streamlit UI
31
+ st.title("Text Summarization Application")
32
+
33
+ st.markdown("Enter the text you want to summarize, and the application will generate a concise summary.")
34
+
35
+ text = st.text_area("Enter text here:", height=200)
36
+
37
+ if st.button("Summarize"):
38
+ if text and model_loaded:
39
+ with st.spinner("Generating summary..."):
40
+ summary = generate_summary(text)
41
+ if summary:
42
+ st.subheader("Summary:")
43
+ st.write(summary)
44
+ else:
45
+ st.warning("Summary generation failed. Please check the input text.")
46
+ elif not model_loaded:
47
+ st.error("Model failed to load. Please check the application logs.")
48
+ else:
49
+ st.warning("Please enter text.")
50
+
51
+ st.markdown("---")
52
+ st.markdown("This application was developed using the T5 model.")