|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
import os |
|
import uuid |
|
import modelscope_studio.components.antd as antd |
|
import modelscope_studio.components.antdx as antdx |
|
import modelscope_studio.components.base as ms |
|
|
|
|
|
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") |
|
|
|
onehealth_logo = os.path.join(os.path.dirname(__file__), "one-health.png") |
|
|
|
|
|
def respond( |
|
message, |
|
history: list[tuple[str, str]], |
|
system_message, |
|
max_tokens, |
|
temperature, |
|
top_p, |
|
): |
|
messages = [{"role": "system", "content": "Your name is Dr. Kiki. You are a caring and conversational Medical Health Consultant. You always start by asking the person’s name and how they are feeling. You engage like a real doctor, not an AI, making the conversation warm and human. Your goal is to understand their concerns by asking clarifying questions and offering thoughtful, reassuring guidance without causing unnecessary worry. You do not diagnose or prescribe medication, but you provide insights on symptoms, risk factors, and next steps while encouraging professional medical care when necessary."}] |
|
|
|
for val in history: |
|
if val[0]: |
|
messages.append({"role": "user", "content": val[0]}) |
|
if val[1]: |
|
messages.append({"role": "assistant", "content": val[1]}) |
|
|
|
messages.append({"role": "user", "content": message}) |
|
|
|
response = "" |
|
|
|
for message in client.chat_completion( |
|
messages, |
|
max_tokens=max_tokens, |
|
stream=True, |
|
temperature=temperature, |
|
top_p=top_p, |
|
): |
|
token = message.choices[0].delta.content |
|
|
|
response += token |
|
yield response |
|
|
|
|
|
""" |
|
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface |
|
""" |
|
demo = gr.ChatInterface( |
|
respond, |
|
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|