Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -169,92 +169,92 @@ Models = {
|
|
169 |
|
170 |
st.sidebar.markdown(f"### Selected Model: {Models[Lng]}")
|
171 |
|
172 |
-
|
173 |
-
if not img_file.endswith(".pdf"):
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
st.write(ocr_text)
|
213 |
-
text_file = BytesIO(ocr_text.encode())
|
214 |
-
st.download_button('Download Text', text_file,
|
215 |
-
file_name='ocr_text.txt')
|
216 |
-
|
217 |
-
elif input_file is not "" or img_file.endswith(".pdf"):
|
218 |
-
button = st.sidebar.button("Run OCR")
|
219 |
-
|
220 |
-
if button:
|
221 |
-
with st.spinner('Running OCR...'):
|
222 |
-
ocr_text = inference_nougat(None, input_file)
|
223 |
-
st.subheader(f"OCR Results for the PDF file")
|
224 |
st.write(ocr_text)
|
225 |
text_file = BytesIO(ocr_text.encode())
|
226 |
st.download_button('Download Text', text_file,
|
227 |
file_name='ocr_text.txt')
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
|
170 |
st.sidebar.markdown(f"### Selected Model: {Models[Lng]}")
|
171 |
|
172 |
+
if img_file:
|
173 |
+
if not img_file.endswith(".pdf"):
|
174 |
+
cropped_img = Image.open(img_file)
|
175 |
+
if not realtime_update:
|
176 |
+
st.write("Double click to save crop")
|
177 |
+
|
178 |
+
# col1, col2 = st.columns(2)
|
179 |
+
# with col1:
|
180 |
+
# st.subheader("Input: Upload and Crop Your Image")
|
181 |
+
# # Get a cropped image from the frontend
|
182 |
+
# cropped_img = st_cropper(
|
183 |
+
# img,
|
184 |
+
# realtime_update=realtime_update,
|
185 |
+
# box_color="#FF0000",
|
186 |
+
# aspect_ratio=aspect_ratio,
|
187 |
+
# should_resize_image=True,
|
188 |
+
# )
|
189 |
+
|
190 |
+
# with col2:
|
191 |
+
# # Manipulate cropped image at will
|
192 |
+
# st.subheader("Output: Preview and Analyze")
|
193 |
+
# # _ = cropped_img.thumbnail((150, 150))
|
194 |
+
# st.image(cropped_img)
|
195 |
+
|
196 |
+
button = st.sidebar.button("Run OCR")
|
197 |
+
|
198 |
+
if button:
|
199 |
+
with st.spinner('Running OCR...'):
|
200 |
+
if Lng == "Arabic":
|
201 |
+
ocr_text = predict_arabic(cropped_img)
|
202 |
+
elif Lng == "English":
|
203 |
+
ocr_text = predict_nougat(cropped_img)
|
204 |
+
elif Lng == "French":
|
205 |
+
ocr_text = predict_tesseract(cropped_img)
|
206 |
+
elif Lng == "Korean":
|
207 |
+
ocr_text = predict_english(cropped_img)
|
208 |
+
elif Lng == "Chinese":
|
209 |
+
ocr_text = predict_english(cropped_img)
|
210 |
+
|
211 |
+
st.subheader(f"OCR Results for {Lng}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
212 |
st.write(ocr_text)
|
213 |
text_file = BytesIO(ocr_text.encode())
|
214 |
st.download_button('Download Text', text_file,
|
215 |
file_name='ocr_text.txt')
|
216 |
+
|
217 |
+
elif input_file is not "" or img_file.endswith(".pdf"):
|
218 |
+
button = st.sidebar.button("Run OCR")
|
219 |
+
|
220 |
+
if button:
|
221 |
+
with st.spinner('Running OCR...'):
|
222 |
+
ocr_text = inference_nougat(None, input_file)
|
223 |
+
st.subheader(f"OCR Results for the PDF file")
|
224 |
+
st.write(ocr_text)
|
225 |
+
text_file = BytesIO(ocr_text.encode())
|
226 |
+
st.download_button('Download Text', text_file,
|
227 |
+
file_name='ocr_text.txt')
|
228 |
+
|
229 |
+
# openai.api_key = ""
|
230 |
+
|
231 |
+
# if "openai_model" not in st.session_state:
|
232 |
+
# st.session_state["openai_model"] = "gpt-3.5-turbo"
|
233 |
+
|
234 |
+
# if "messages" not in st.session_state:
|
235 |
+
# st.session_state.messages = []
|
236 |
+
|
237 |
+
# for message in st.session_state.messages:
|
238 |
+
# with st.chat_message(message["role"]):
|
239 |
+
# st.markdown(message["content"])
|
240 |
+
|
241 |
+
# if prompt := st.chat_input("How can I help?"):
|
242 |
+
# st.session_state.messages.append({"role": "user", "content": ocr_text + prompt})
|
243 |
+
# with st.chat_message("user"):
|
244 |
+
# st.markdown(prompt)
|
245 |
+
|
246 |
+
# with st.chat_message("assistant"):
|
247 |
+
# message_placeholder = st.empty()
|
248 |
+
# full_response = ""
|
249 |
+
# for response in openai.ChatCompletion.create(
|
250 |
+
# model=st.session_state["openai_model"],
|
251 |
+
# messages=[
|
252 |
+
# {"role": m["role"], "content": m["content"]}
|
253 |
+
# for m in st.session_state.messages
|
254 |
+
# ],
|
255 |
+
# stream=True,
|
256 |
+
# ):
|
257 |
+
# full_response += response.choices[0].delta.get("content", "")
|
258 |
+
# message_placeholder.markdown(full_response + "▌")
|
259 |
+
# message_placeholder.markdown(full_response)
|
260 |
+
# st.session_state.messages.append({"role": "assistant", "content": full_response})
|