Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,103 +1,99 @@
|
|
1 |
-
#
|
2 |
-
import gradio as gr # Gradio:
|
3 |
-
import requests #
|
4 |
-
from openai import OpenAI #
|
5 |
-
from io import BytesIO #
|
6 |
|
7 |
def extract_text_from_image(image, api_key):
|
8 |
"""
|
9 |
-
|
10 |
"""
|
11 |
-
# Upstage API Endpoint
|
12 |
url = "https://api.upstage.ai/v1/document-digitization"
|
13 |
|
14 |
-
#
|
15 |
headers = {'Authorization': f'Bearer {api_key}'}
|
16 |
|
17 |
-
#
|
18 |
buffer = BytesIO()
|
19 |
image.save(buffer, format="JPEG")
|
20 |
buffer.seek(0)
|
21 |
|
22 |
-
#
|
23 |
files = {"document": ("image.jpg", buffer, "image/jpeg")}
|
24 |
-
data = {"model": "ocr"} #
|
25 |
|
26 |
-
# POST
|
27 |
response = requests.post(url, headers=headers, files=files, data=data)
|
28 |
|
29 |
-
#
|
30 |
if response.status_code == 200:
|
31 |
-
text = response.json().get("text", "") #
|
32 |
-
return text.strip() #
|
33 |
else:
|
34 |
-
#
|
35 |
-
return f"OCR
|
36 |
|
37 |
|
38 |
|
39 |
def translate_text_with_solar(english_text, api_key):
|
40 |
"""
|
41 |
-
|
42 |
"""
|
43 |
-
#
|
44 |
client = OpenAI(
|
45 |
api_key=api_key,
|
46 |
base_url="https://api.upstage.ai/v1"
|
47 |
)
|
48 |
|
49 |
-
#
|
50 |
-
|
51 |
-
# ์ฌ์ฉ์์๊ฒ ์ ๋ฌํ ํ๋กฌํํธ ๊ตฌ์ฑ
|
52 |
prompt = f"""
|
53 |
-
|
54 |
-
{
|
55 |
-
|
56 |
-
|
57 |
"""
|
58 |
|
59 |
-
# Solar LLM
|
60 |
response = client.chat.completions.create(
|
61 |
-
model="solar-pro", #
|
62 |
-
messages=[{"role": "user", "content": prompt}], #
|
63 |
-
temperature=0.5, #
|
64 |
-
max_tokens=1024 #
|
65 |
)
|
66 |
|
67 |
-
#
|
68 |
-
|
69 |
-
# ๋ฒ์ญ๋ ๊ฒฐ๊ณผ ํ
์คํธ ๋ฐํ
|
70 |
return response.choices[0].message.content
|
71 |
|
72 |
|
73 |
-
# Gradio
|
74 |
with gr.Blocks() as demo:
|
75 |
-
#
|
76 |
-
gr.Markdown("# ๐
|
77 |
-
gr.Markdown("
|
78 |
-
gr.Markdown("
|
79 |
|
80 |
-
# โ
API Key
|
81 |
api_key_input = gr.Textbox(label="๐ Upstage API Key", type="password", placeholder="Paste your API key here")
|
82 |
|
83 |
-
#
|
84 |
with gr.Row():
|
85 |
-
#
|
86 |
with gr.Column(scale=1):
|
87 |
-
image_input = gr.Image(type="pil", label=" ๐
|
88 |
|
89 |
-
#
|
90 |
with gr.Column(scale=2):
|
91 |
-
english_box = gr.Textbox(label="๐
|
92 |
-
translate_button = gr.Button("๐
|
93 |
-
korean_box = gr.Textbox(label="
|
94 |
|
95 |
-
# Step 1:
|
96 |
image_input.change(fn=extract_text_from_image, inputs=[image_input, api_key_input], outputs=english_box)
|
97 |
|
98 |
-
# Step 2:
|
99 |
translate_button.click(fn=translate_text_with_solar, inputs=[english_box, api_key_input], outputs=korean_box)
|
100 |
|
101 |
-
#
|
102 |
if __name__ == "__main__":
|
103 |
-
demo.launch()
|
|
|
1 |
+
# Import necessary libraries
|
2 |
+
import gradio as gr # Gradio: Library for building web interfaces
|
3 |
+
import requests # Library for sending API requests
|
4 |
+
from openai import OpenAI # OpenAI-compatible client for using Upstage Solar LLM
|
5 |
+
from io import BytesIO # Tool for handling image data in memory
|
6 |
|
7 |
def extract_text_from_image(image, api_key):
|
8 |
"""
|
9 |
+
Function to extract text from an image (using Upstage Document OCR API)
|
10 |
"""
|
11 |
+
# Upstage API Endpoint
|
12 |
url = "https://api.upstage.ai/v1/document-digitization"
|
13 |
|
14 |
+
# Set up headers for API Key authentication
|
15 |
headers = {'Authorization': f'Bearer {api_key}'}
|
16 |
|
17 |
+
# Save the image to a memory buffer (JPEG format)
|
18 |
buffer = BytesIO()
|
19 |
image.save(buffer, format="JPEG")
|
20 |
buffer.seek(0)
|
21 |
|
22 |
+
# Prepare files and data for the request
|
23 |
files = {"document": ("image.jpg", buffer, "image/jpeg")}
|
24 |
+
data = {"model": "ocr"} # Model to use: OCR
|
25 |
|
26 |
+
# Send POST request
|
27 |
response = requests.post(url, headers=headers, files=files, data=data)
|
28 |
|
29 |
+
# If request is successful, extract text
|
30 |
if response.status_code == 200:
|
31 |
+
text = response.json().get("text", "") # Extract text from JSON response
|
32 |
+
return text.strip() # Remove leading/trailing whitespace and return
|
33 |
else:
|
34 |
+
# Return error message on failure
|
35 |
+
return f"OCR Failed: {response.status_code} - {response.text}"
|
36 |
|
37 |
|
38 |
|
39 |
def translate_text_with_solar(english_text, api_key):
|
40 |
"""
|
41 |
+
Function to translate Korean text into English (using Upstage Solar Pro API)
|
42 |
"""
|
43 |
+
# Initialize OpenAI client for calling Solar LLM
|
44 |
client = OpenAI(
|
45 |
api_key=api_key,
|
46 |
base_url="https://api.upstage.ai/v1"
|
47 |
)
|
48 |
|
49 |
+
# Construct prompt for the model
|
|
|
|
|
50 |
prompt = f"""
|
51 |
+
Below is a handwritten letter in Korean.\n
|
52 |
+
{korean_text} \n
|
53 |
+
Please translate it into English.\n\n
|
54 |
+
Translated letter in English: "
|
55 |
"""
|
56 |
|
57 |
+
# Call Solar LLM to perform translation
|
58 |
response = client.chat.completions.create(
|
59 |
+
model="solar-pro", # Model to use
|
60 |
+
messages=[{"role": "user", "content": prompt}], # User message
|
61 |
+
temperature=0.5, # Creativity level (0.0~1.0)
|
62 |
+
max_tokens=1024 # Max response length
|
63 |
)
|
64 |
|
65 |
+
# Return translated text
|
|
|
|
|
66 |
return response.choices[0].message.content
|
67 |
|
68 |
|
69 |
+
# Gradio interface layout
|
70 |
with gr.Blocks() as demo:
|
71 |
+
# Header description
|
72 |
+
gr.Markdown("# ๐ Handwritten Letter Translator")
|
73 |
+
gr.Markdown("Upload a letter image to extract Korean text using Upstage Document OCR.\nClick the ๐ Translate button to translate it into English using Solar LLM!")
|
74 |
+
gr.Markdown("The example images are AI-generated. Click the Files button to view or download them.")
|
75 |
|
76 |
+
# โ
API Key input
|
77 |
api_key_input = gr.Textbox(label="๐ Upstage API Key", type="password", placeholder="Paste your API key here")
|
78 |
|
79 |
+
# Layout: 2-column format
|
80 |
with gr.Row():
|
81 |
+
# Left column: image upload
|
82 |
with gr.Column(scale=1):
|
83 |
+
image_input = gr.Image(type="pil", label=" ๐ Upload Letter Image")
|
84 |
|
85 |
+
# Right column: extracted text and translation
|
86 |
with gr.Column(scale=2):
|
87 |
+
english_box = gr.Textbox(label="๐ Extracted Korean Text", lines=10)
|
88 |
+
translate_button = gr.Button("๐ Translate")
|
89 |
+
korean_box = gr.Textbox(label="Translated English Text", lines=10)
|
90 |
|
91 |
+
# Step 1: Run OCR when image is uploaded โ display extracted text
|
92 |
image_input.change(fn=extract_text_from_image, inputs=[image_input, api_key_input], outputs=english_box)
|
93 |
|
94 |
+
# Step 2: Run translation when button is clicked โ display translated result
|
95 |
translate_button.click(fn=translate_text_with_solar, inputs=[english_box, api_key_input], outputs=korean_box)
|
96 |
|
97 |
+
# Run app
|
98 |
if __name__ == "__main__":
|
99 |
+
demo.launch()
|