LLaMA_Server / app.py
rapacious's picture
Update app.py
f7c9c71 verified
raw
history blame
453 Bytes
import gradio as gr
import requests
def chat_with_qwen(prompt):
response = requests.post("http://localhost:8000/completion", json={"prompt": prompt, "max_tokens": 100})
return response.json().get("text", "Không có phản hồi từ server.")
iface = gr.Interface(
fn=chat_with_qwen,
inputs="text",
outputs="text",
title="Qwen2.5-0.5B Chatbot trên Hugging Face Space"
)
iface.launch(server_name="0.0.0.0", server_port=7860)