wanda222's picture
Update app.py
1e80252 verified
raw
history blame
2.62 kB
import gradio as gr
import requests
import os
from openai import OpenAI
from io import BytesIO
UPSTAGE_API_KEY = os.getenv("UPSTAGE_API_KEY")
def extract_text_from_image(image):
url = "https://api.upstage.ai/v1/document-digitization"
headers = {'Authorization': f'Bearer {UPSTAGE_API_KEY}'}
buffer = BytesIO()
image.save(buffer, format="JPEG")
buffer.seek(0)
files = {"document": ("image.jpg", buffer, "image/jpeg")}
data = {"model": "ocr"}
response = requests.post(url, headers=headers, files=files, data=data)
if response.status_code == 200:
text = response.json().get("text", "")
return text.strip()
else:
return f"OCR ์‹คํŒจ: {response.status_code} - {response.text}"
def translate_text_with_solar(english_text):
# Initialize the OpenAI client for Solar LLM
client = OpenAI(
api_key=UPSTAGE_API_KEY,
base_url="https://api.upstage.ai/v1"
)
print("== ์ฑ„ํŒ… ํ•จ์ˆ˜ ํ˜ธ์ถœ๋จ ==")
prompt = f"""
๋‹ค์Œ์€ ์˜์–ด ์†๊ธ€์”จ ํŽธ์ง€ ๋‚ด์šฉ์ž…๋‹ˆ๋‹ค.\n
{english_text} \n
์˜์–ด๋ฅผ ํ•œ๊ตญ์–ด๋กœ ๋ฒˆ์—ญํ•ด์ฃผ์„ธ์š”.\n\n
ํ•œ๊ตญ์–ด๋กœ ๋ณ€์—ญ๋œ ํŽธ์ง€ ๋‚ด์šฉ: "
"""
response = client.chat.completions.create(
model="solar-pro",
messages=[{"role": "user", "content": prompt}],
temperature=0.5,
max_tokens=1024
)
print(response)
return response.choices[0].message.content
with gr.Blocks(title="๐Ÿ’Œ ์†๊ธ€์”จ ํŽธ์ง€ ๋ฒˆ์—ญ๊ธฐ") as demo:
gr.Markdown("๐Ÿ’Œ ์†๊ธ€์”จ ํŽธ์ง€ ๋ฒˆ์—ญ๊ธฐ")
gr.Markdown("ํŽธ์ง€ ์ด๋ฏธ์ง€๋ฅผ ์—…๋กœ๋“œํ•˜๋ฉด Upstage Docuemnt OCR์ด ์˜์–ด ํ…์ŠคํŠธ๋ฅผ ์ถ”์ถœํ•˜๊ณ ,\n๐ŸŒ ๋ฒˆ์—ญํ•˜๊ธฐ ๋ฒ„ํŠผ์„ ๋ˆ„๋ฅด๋ฉด Solar LLM์„ ํ˜ธ์ถœํ•˜์—ฌ ํ•œ๊ตญ์–ด๋กœ ๋ฒˆ์—ญํ•ฉ๋‹ˆ๋‹ค!")
with gr.Row():
# ์™ผ์ชฝ: ์ด๋ฏธ์ง€ ์—…๋กœ๋“œ
with gr.Column(scale=1):
image_input = gr.Image(type="pil", label=" ๐Ÿ’Œ ํŽธ์ง€ ์ด๋ฏธ์ง€ ์—…๋กœ๋“œ")
# ์˜ค๋ฅธ์ชฝ: ํ…์ŠคํŠธ ๊ฒฐ๊ณผ
with gr.Column(scale=2):
english_box = gr.Textbox(label="๐Ÿ“ ์ถ”์ถœ๋œ ์˜์–ด ํ…์ŠคํŠธ", lines=10)
translate_button = gr.Button("๐ŸŒ ๋ฒˆ์—ญํ•˜๊ธฐ")
korean_box = gr.Textbox(label="๐Ÿ‡ฐ๐Ÿ‡ท ๋ฒˆ์—ญ๋œ ํ•œ๊ตญ์–ด ํ…์ŠคํŠธ", lines=10)
# Step 1: ์ด๋ฏธ์ง€ ์—…๋กœ๋“œ ์‹œ OCR ์‹คํ–‰
image_input.change(fn=extract_text_from_image, inputs=image_input, outputs=english_box)
# Step 2: ๋ฒ„ํŠผ ๋ˆ„๋ฅด๋ฉด ๋ฒˆ์—ญ
translate_button.click(fn=translate_text_with_solar, inputs=english_box, outputs=korean_box)
if __name__ == "__main__":
demo.launch()