Yanqing0327 commited on
Commit
9bebba9
·
verified ·
1 Parent(s): 75864ef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -45
app.py CHANGED
@@ -1,48 +1,29 @@
1
  import gradio as gr
2
  import requests
3
- import base64
4
- from PIL import Image
5
- import io
6
 
7
- # **本地 GPU 服务器 API**
8
- LOCAL_SERVER_URL = "http://127.0.0.1:5000/infer"
9
-
10
- def image_to_base64(image):
11
- """PIL Image -> Base64"""
12
- buffer = io.BytesIO()
13
- image.save(buffer, format="PNG")
14
- return base64.b64encode(buffer.getvalue()).decode("utf-8")
15
-
16
- def llava_infer(image, text):
17
- """把用户输入的图片+文本发送到本地服务器"""
18
- if image is None or text.strip() == "":
19
- return "请提供图片和文本输入"
20
-
21
- image_base64 = image_to_base64(image)
22
- payload = {"image": image_base64, "text": text}
23
-
24
- try:
25
- response = requests.post(LOCAL_SERVER_URL, json=payload)
26
- response_data = response.json()
27
- return response_data["response"]
28
- except Exception as e:
29
- return f"服务器错误: {e}"
30
-
31
- # **Gradio Web UI**
32
- with gr.Blocks(title="LLaVA Remote Web UI") as demo:
33
- gr.Markdown("# 🌋 LLaVA Web Interface (Remote Inference)")
34
- gr.Markdown("上传图片并输入文本,LLaVA 将在远程 GPU 服务器推理")
35
-
36
- with gr.Row():
37
- with gr.Column(scale=3):
38
- image_input = gr.Image(type="pil", label="上传图片")
39
- text_input = gr.Textbox(placeholder="输入文本...", label="输入文本")
40
- submit_button = gr.Button("提交")
41
-
42
- with gr.Column(scale=7):
43
- chatbot_output = gr.Textbox(label="LLaVA 输出", interactive=False)
44
-
45
- submit_button.click(fn=llava_infer, inputs=[image_input, text_input], outputs=chatbot_output)
46
-
47
- # **启动 Hugging Face Web UI**
48
- demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
 
1
  import gradio as gr
2
  import requests
 
 
 
3
 
4
+ API_URL = "http://169.233.7.2/infer"
5
+
6
+ def process_image(image, text, conv_mode):
7
+ """调用本地 API 进行推理"""
8
+ if image is None:
9
+ return "Please upload an image."
10
+
11
+ files = {"image": image}
12
+ data = {"text": text, "conv_mode": conv_mode}
13
+
14
+ response = requests.post(API_URL, files=files, json=data)
15
+ return response.json().get("response", "Error processing the image.")
16
+
17
+ iface = gr.Interface(
18
+ fn=process_image,
19
+ inputs=[
20
+ gr.Image(type="file"),
21
+ gr.Textbox(label="Prompt"),
22
+ gr.Dropdown(choices=["vicuna_v1", "llama3"], label="Conversation Mode")
23
+ ],
24
+ outputs="text",
25
+ title="LLaVA Web Interface",
26
+ description="Upload an image and provide a prompt to interact with LLaVA."
27
+ )
28
+
29
+ iface.launch(server_name="0.0.0.0", server_port=7860)