Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -31,14 +31,30 @@ st.markdown("""
|
|
31 |
@st.cache_resource
|
32 |
def load_models():
|
33 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
summarization_model = pipeline(
|
35 |
"summarization",
|
36 |
model="facebook/bart-large-cnn"
|
37 |
)
|
|
|
38 |
except Exception as e:
|
39 |
raise RuntimeError(f"Failed to load models: {str(e)}")
|
40 |
|
41 |
-
return summarization_model
|
42 |
|
43 |
def extract_text_from_pdf(uploaded_file):
|
44 |
try:
|
@@ -76,7 +92,7 @@ def extract_text_from_file(uploaded_file, file_type):
|
|
76 |
return None
|
77 |
|
78 |
try:
|
79 |
-
summarization_model = load_models()
|
80 |
except Exception as e:
|
81 |
st.error(f"An error occurred while loading models: {e}")
|
82 |
|
@@ -113,13 +129,74 @@ if option == "Text Summarization":
|
|
113 |
|
114 |
elif option == "Question Answering":
|
115 |
st.title("Question Answering")
|
116 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
|
118 |
elif option == "Text Classification":
|
119 |
st.title("Text Classification")
|
120 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
|
122 |
elif option == "Language Translation":
|
123 |
-
st.title("Language Translation")
|
124 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
|
|
|
31 |
@st.cache_resource
|
32 |
def load_models():
|
33 |
try:
|
34 |
+
text_classification_model = pipeline(
|
35 |
+
"text-classification",
|
36 |
+
model="distilbert-base-uncased-finetuned-sst-2-english"
|
37 |
+
)
|
38 |
+
|
39 |
+
question_answering_model = pipeline(
|
40 |
+
"question-answering",
|
41 |
+
model="distilbert-base-uncased-distilled-squad"
|
42 |
+
)
|
43 |
+
|
44 |
+
translation_model = pipeline(
|
45 |
+
"translation",
|
46 |
+
model="Helsinki-NLP/opus-mt-en-fr"
|
47 |
+
)
|
48 |
+
|
49 |
summarization_model = pipeline(
|
50 |
"summarization",
|
51 |
model="facebook/bart-large-cnn"
|
52 |
)
|
53 |
+
|
54 |
except Exception as e:
|
55 |
raise RuntimeError(f"Failed to load models: {str(e)}")
|
56 |
|
57 |
+
return text_classification_model, question_answering_model, translation_model, summarization_model
|
58 |
|
59 |
def extract_text_from_pdf(uploaded_file):
|
60 |
try:
|
|
|
92 |
return None
|
93 |
|
94 |
try:
|
95 |
+
classification_model, qa_model, translation_model, summarization_model = load_models()
|
96 |
except Exception as e:
|
97 |
st.error(f"An error occurred while loading models: {e}")
|
98 |
|
|
|
129 |
|
130 |
elif option == "Question Answering":
|
131 |
st.title("Question Answering")
|
132 |
+
st.markdown("<h4 style='font-size: 20px;'>- because Google wasn't enough π</h4>", unsafe_allow_html=True)
|
133 |
+
|
134 |
+
uploaded_file = st.file_uploader("Upload a document (PDF, DOCX, TXT) for context (optional)", type=["pdf", "docx", "txt"])
|
135 |
+
|
136 |
+
context_input = st.text_area("Enter context (or leave empty if uploading a file):")
|
137 |
+
question = st.text_input("Enter your question:")
|
138 |
+
|
139 |
+
if uploaded_file:
|
140 |
+
file_type = uploaded_file.name.split(".")[-1].lower()
|
141 |
+
context_input = extract_text_from_file(uploaded_file, file_type)
|
142 |
+
|
143 |
+
if st.button("Get Answer"):
|
144 |
+
with st.spinner('Finding answer...'):
|
145 |
+
try:
|
146 |
+
if context_input and question:
|
147 |
+
answer = qa_model(question=question, context=context_input)
|
148 |
+
st.write("Answer:", answer['answer'])
|
149 |
+
st.balloons()
|
150 |
+
else:
|
151 |
+
st.error("Please enter both context and a question.")
|
152 |
+
except Exception as e:
|
153 |
+
st.error(f"An error occurred: {e}")
|
154 |
|
155 |
elif option == "Text Classification":
|
156 |
st.title("Text Classification")
|
157 |
+
st.markdown("<h4 style='font-size: 20px;'>- where machines learn to hate spam as much as we do π
</h4>", unsafe_allow_html=True)
|
158 |
+
|
159 |
+
text = st.text_area("Enter text for classification:")
|
160 |
+
|
161 |
+
if st.button("Classify Text"):
|
162 |
+
with st.spinner('Classifying text...'):
|
163 |
+
try:
|
164 |
+
classification = classification_model(text)
|
165 |
+
st.json(classification)
|
166 |
+
st.balloons()
|
167 |
+
except Exception as e:
|
168 |
+
st.error(f"An error occurred: {e}")
|
169 |
|
170 |
elif option == "Language Translation":
|
171 |
+
st.title("Language Translation (English to Multiple Languages)")
|
172 |
+
st.markdown("<h4 style='font-size: 20px;'>- when 'translate' is the only button you know π</h4>", unsafe_allow_html=True)
|
173 |
+
|
174 |
+
target_language = st.selectbox("Choose target language", ["French", "Spanish", "German", "Italian", "Portuguese", "Hindi"])
|
175 |
+
|
176 |
+
language_models = {
|
177 |
+
"French": "Helsinki-NLP/opus-mt-en-fr",
|
178 |
+
"Spanish": "Helsinki-NLP/opus-mt-en-es",
|
179 |
+
"German": "Helsinki-NLP/opus-mt-en-de",
|
180 |
+
"Italian": "Helsinki-NLP/opus-mt-en-it",
|
181 |
+
"Portuguese": "Helsinki-NLP/opus-mt-en-pt",
|
182 |
+
"Hindi": "Helsinki-NLP/opus-mt-en-hi"
|
183 |
+
}
|
184 |
+
|
185 |
+
selected_model = language_models.get(target_language)
|
186 |
+
translation_pipeline = pipeline("translation", model=selected_model)
|
187 |
+
|
188 |
+
text_to_translate = st.text_area(f"Enter text to translate from English to {target_language}:")
|
189 |
+
|
190 |
+
if st.button("Translate"):
|
191 |
+
with st.spinner('Translating...'):
|
192 |
+
try:
|
193 |
+
if text_to_translate:
|
194 |
+
translated_text = translation_pipeline(text_to_translate)
|
195 |
+
st.write(f"Translated Text ({target_language}):", translated_text[0]['translation_text'])
|
196 |
+
st.balloons()
|
197 |
+
else:
|
198 |
+
st.error("Please enter text to translate.")
|
199 |
+
except Exception as e:
|
200 |
+
st.error(f"An error occurred: {e}")
|
201 |
+
|
202 |
|