Spaces:
Sleeping
Sleeping
File size: 2,618 Bytes
4ef4a96 89ed063 4ef4a96 89ed063 4ef4a96 89ed063 4ef4a96 89ed063 4ef4a96 89ed063 4ef4a96 89ed063 4ef4a96 89ed063 a509a74 4ef4a96 89ed063 4ef4a96 89ed063 4ef4a96 89ed063 a509a74 4ef4a96 f3600a5 4ef4a96 f3600a5 4ef4a96 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import gradio as gr
import requests
import os
from openai import OpenAI
from io import BytesIO
UPSTAGE_API_KEY = os.getenv("UPSTAGE_API_KEY")
def extract_text_from_image(image):
url = "https://api.upstage.ai/v1/document-digitization"
headers = {'Authorization': f'Bearer {UPSTAGE_API_KEY}'}
buffer = BytesIO()
image.save(buffer, format="JPEG")
buffer.seek(0)
files = {"document": ("image.jpg", buffer, "image/jpeg")}
data = {"model": "ocr"}
response = requests.post(url, headers=headers, files=files, data=data)
if response.status_code == 200:
text = response.json().get("text", "")
return text.strip()
else:
return f"OCR ์คํจ: {response.status_code} - {response.text}"
def translate_text_with_solar(english_text):
# Initialize the OpenAI client for Solar LLM
client = OpenAI(
api_key=UPSTAGE_API_KEY,
base_url="https://api.upstage.ai/v1"
)
print("== ์ฑํ
ํจ์ ํธ์ถ๋จ ==")
prompt = f"""
๋ค์์ ์์ด ์๊ธ์จ ํธ์ง ๋ด์ฉ์
๋๋ค.\n
{english_text} \n
์์ด๋ฅผ ํ๊ตญ์ด๋ก ๋ฒ์ญํด์ฃผ์ธ์.\n\n
ํ๊ตญ์ด๋ก ๋ณ์ญ๋ ํธ์ง ๋ด์ฉ: "
"""
response = client.chat.completions.create(
model="solar-pro",
messages=[{"role": "user", "content": prompt}],
temperature=0.5,
max_tokens=1024
)
print(response)
return response.choices[0].message.content
with gr.Blocks(title="๐ ์๊ธ์จ ํธ์ง ๋ฒ์ญ๊ธฐ") as demo:
gr.Markdown("๐ ์๊ธ์จ ํธ์ง ๋ฒ์ญ๊ธฐ")
gr.Markdown("ํธ์ง ์ด๋ฏธ์ง๋ฅผ ์
๋ก๋ํ๋ฉด Upstage Docuemnt OCR์ด ์์ด ํ
์คํธ๋ฅผ ์ถ์ถํ๊ณ ,\n๐ ๋ฒ์ญํ๊ธฐ ๋ฒํผ์ ๋๋ฅด๋ฉด Solar LLM์ ํธ์ถํ์ฌ ํ๊ตญ์ด๋ก ๋ฒ์ญํฉ๋๋ค!")
with gr.Row():
# ์ผ์ชฝ: ์ด๋ฏธ์ง ์
๋ก๋
with gr.Column(scale=1):
image_input = gr.Image(type="pil", label=" ๐ ํธ์ง ์ด๋ฏธ์ง ์
๋ก๋")
# ์ค๋ฅธ์ชฝ: ํ
์คํธ ๊ฒฐ๊ณผ
with gr.Column(scale=2):
english_box = gr.Textbox(label="๐ ์ถ์ถ๋ ์์ด ํ
์คํธ", lines=10)
translate_button = gr.Button("๐ ๋ฒ์ญํ๊ธฐ")
korean_box = gr.Textbox(label="๐ฐ๐ท ๋ฒ์ญ๋ ํ๊ตญ์ด ํ
์คํธ", lines=10)
# Step 1: ์ด๋ฏธ์ง ์
๋ก๋ ์ OCR ์คํ
image_input.change(fn=extract_text_from_image, inputs=image_input, outputs=english_box)
# Step 2: ๋ฒํผ ๋๋ฅด๋ฉด ๋ฒ์ญ
translate_button.click(fn=translate_text_with_solar, inputs=english_box, outputs=korean_box)
if __name__ == "__main__":
demo.launch()
|