Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,7 @@ from transformers import AutoModelForCausalLM, AutoProcessor
|
|
6 |
import torch
|
7 |
import subprocess
|
8 |
from io import BytesIO
|
|
|
9 |
|
10 |
# Install flash-attn
|
11 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
@@ -52,12 +53,11 @@ def solve_math_problem(image):
|
|
52 |
model.to('cpu')
|
53 |
return response
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
# Custom CSS
|
62 |
custom_css = """
|
63 |
<style>
|
@@ -243,12 +243,12 @@ with gr.Blocks(css=custom_css) as iface:
|
|
243 |
|
244 |
gr.Examples(
|
245 |
examples=[
|
246 |
-
"eqn1.png",
|
247 |
-
"eqn2.png"
|
248 |
],
|
249 |
inputs=input_image,
|
250 |
outputs=output_text,
|
251 |
-
fn=
|
252 |
cache_examples=True,
|
253 |
)
|
254 |
|
|
|
6 |
import torch
|
7 |
import subprocess
|
8 |
from io import BytesIO
|
9 |
+
import os
|
10 |
|
11 |
# Install flash-attn
|
12 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
|
|
53 |
model.to('cpu')
|
54 |
return response
|
55 |
|
56 |
+
def load_image_from_file(file_path):
|
57 |
+
if os.path.exists(file_path):
|
58 |
+
return Image.open(file_path)
|
59 |
+
else:
|
60 |
+
raise FileNotFoundError(f"Image file not found: {file_path}")
|
|
|
61 |
# Custom CSS
|
62 |
custom_css = """
|
63 |
<style>
|
|
|
243 |
|
244 |
gr.Examples(
|
245 |
examples=[
|
246 |
+
os.path.join(os.path.dirname(__file__), "eqn1.png"),
|
247 |
+
os.path.join(os.path.dirname(__file__), "eqn2.png")
|
248 |
],
|
249 |
inputs=input_image,
|
250 |
outputs=output_text,
|
251 |
+
fn=solve_math_problem,
|
252 |
cache_examples=True,
|
253 |
)
|
254 |
|