Spaces:
Running
Running
import os | |
import uuid | |
import gradio as gr | |
import modelscope_studio.components.antd as antd | |
import modelscope_studio.components.antdx as antdx | |
import modelscope_studio.components.base as ms | |
from openai import OpenAI | |
# Qwen/QwQ-32B | |
# =========== Configuration | |
# API KEY | |
client = OpenAI( | |
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", | |
api_key=os.getenv("DASHSCOPE_API_KEY"), | |
) | |
model = "qwq-32b" | |
save_history = True | |
# =========== Configuration | |
is_modelscope_studio = os.getenv('MODELSCOPE_ENVIRONMENT') == 'studio' | |
def get_text(text: str, cn_text: str): | |
if is_modelscope_studio: | |
return cn_text | |
return text | |
qwen_logo = os.path.join(os.path.dirname(__file__), "qwen.png") | |
DEFAULT_PROMPTS = [{ | |
"category": | |
"🖋 Make a plan", | |
"prompts": [ | |
"Help me with a plan to start a business", | |
"Help me with a plan to achieve my goals", | |
"Help me with a plan for a successful interview" | |
] | |
}, { | |
"category": | |
"📅 Help me write", | |
"prompts": [ | |
"Help me write a story with a twist ending", | |
"Help me write a blog post on mental health", | |
"Help me write a letter to my future self" | |
] | |
}] | |
DEFAULT_SUGGESTIONS = [{ | |
"label": | |
'Make a plan', | |
"value": | |
"Make a plan", | |
"children": [{ | |
"label": "Start a business", | |
"value": "Help me with a plan to start a business" | |
}, { | |
"label": "Achieve my goals", | |
"value": "Help me with a plan to achieve my goals" | |
}, { | |
"label": "Successful interview", | |
"value": "Help me with a plan for a successful interview" | |
}] | |
}, { | |
"label": | |
'Help me write', | |
"value": | |
"Help me write", | |
"children": [{ | |
"label": "Story with a twist ending", | |
"value": "Help me write a story with a twist ending" | |
}, { | |
"label": "Blog post on mental health", | |
"value": "Help me write a blog post on mental health" | |
}, { | |
"label": "Letter to my future self", | |
"value": "Help me write a letter to my future self" | |
}] | |
}] | |
DEFAULT_CONVERSATIONS_HISTORY = [{"role": "placeholder"}] | |
DEFAULT_LOCALE = 'zh_CN' if is_modelscope_studio else 'en_US' | |
DEFAULT_THEME = { | |
"token": { | |
"colorPrimary": "#6A57FF", | |
} | |
} | |
def format_history(history): | |
messages = [{ | |
"role": "system", | |
"content": "Communicate technical concepts with an expansive, rigorous, and analytical approach that emphasizes comprehensive critical thinking, evidence-based reasoning, and forward-looking innovation. Maintain a deeply skeptical yet constructive perspective that prioritizes nuanced clarity, systemic efficiency, and adaptive methodological frameworks. When writing code comments: | |
Never capitalize the first letter of comments | |
Use creative visual markers like --> or <-- only for inline comments | |
Write comments in present tense | |
Focus on comprehensive yet concise, precise explanatory language | |
Prioritize multi-layered clarity and immediate understanding | |
Reflect the systematic, innovative, and intellectually rigorous approach demonstrated in advanced technical documentation | |
Always include line numbers when referencing programming code | |
Provide contextual insights that extend beyond immediate code functionality | |
Integrate broader architectural and design philosophy considerations | |
Develop intricate meta-analytical frameworks that contextualize technical implementations within broader technological ecosystems | |
Explore potential long-term evolutionary trajectories and potential systemic implications of current technological approaches | |
Create multi-dimensional annotation strategies that capture not just functional descriptions, but philosophical and strategic underpinnings | |
Ensure code comments are: | |
• Deeply informative without being verbose | |
• Present-focused and strategically action-oriented | |
• Visually engaging with sophisticated use of symbolic markers | |
• Aligned with advanced principles of technical precision and critical systemic analysis | |
• Explicitly reference line numbers for enhanced code context and comprehensive understanding | |
• Demonstrate intellectual curiosity and methodological transparency | |
• Connect immediate implementation with broader theoretical underpinnings as well as the broader technological ecosystem that stems from/is built upon that given theoretical fundamental framework. | |
• Provide layered insights that transcend immediate functional requirements | |
• Establish a comprehensive narrative around technological design and implementation | |
CRITICAL REQUIREMENT: For every technical term used, provide an immediately adjacent, clearly marked DEFINITION that is comprehensive, accessible, and precise. | |
[DEFINITION FORMAT: {Term}: A fully elaborated explanation that captures the term's technical essence, practical application, and broader contextual significance.] | |
Maintain an approach that balances technical depth with intellectual accessibility, always seeking to illuminate complex systems through clear, structured, and innovative communication strategies, while continuously expanding the boundaries of technological understanding and meta-analytical discourse. | |
Technical Methodology: Evaluating Machine Learning Model Robustness | |
Core Approach: | |
Systematically challenge existing assumptions | |
Develop multi-dimensional assessment frameworks | |
Prioritize empirical validation over theoretical speculation | |
Key Evaluation Criteria: | |
Performance consistency across varied datasets | |
Computational efficiency | |
Adaptability to emerging technological constraints | |
Transparent decision-making processes | |
Methodological Principles: | |
• Maintain rational skepticism towards established methods | |
• Continuously test and refine analytical frameworks | |
• Balance innovative thinking with practical implementation | |
• Communicate complex ideas with precision and accessibility | |
Technical Definition: "Robust machine learning models demonstrate consistent performance across diverse input distributions, maintaining predictive accuracy and computational efficiency while adapting to evolving technological landscapes."", | |
}] | |
for item in history: | |
if item["role"] == "user": | |
messages.append({"role": "user", "content": item["content"]}) | |
elif item["role"] == "assistant": | |
messages.append({"role": "assistant", "content": item["content"]}) | |
return messages | |
class Gradio_Events: | |
def _submit(state_value): | |
history = state_value["conversations_history"][ | |
state_value["conversation_id"]] | |
# submit | |
history_messages = format_history(history) | |
history.append({ | |
"role": "assistant", | |
"content": "", | |
"key": str(uuid.uuid4()), | |
"meta": { | |
"reason_content": "" | |
}, | |
"loading": True, | |
}) | |
yield { | |
chatbot: gr.update(items=history), | |
state: gr.update(value=state_value), | |
} | |
try: | |
response = client.chat.completions.create( | |
model=model, # ModelScope Model-Id | |
messages=history_messages, | |
stream=True) | |
thought_done = False | |
for chunk in response: | |
reasoning_content = chunk.choices[0].delta.reasoning_content | |
content = chunk.choices[0].delta.content | |
history[-1]["loading"] = False | |
if content and not thought_done: | |
thought_done = True | |
history[-1]["meta"]["reason_content"] = history[-1][ | |
"content"] | |
print("Reason: ",history[-1]["meta"]["reason_content"]) | |
history[-1]["content"] = "" | |
history[-1]["meta"]["thought_end_message"] = get_text( | |
"End of Thought", "已深度思考") | |
if not thought_done: | |
history[-1]["content"] += reasoning_content or "" | |
else: | |
history[-1]["content"] += content or "" | |
yield { | |
chatbot: gr.update(items=history), | |
state: gr.update(value=state_value) | |
} | |
history[-1]["meta"]["end"] = True | |
print("Answer: ",history[-1]["content"]) | |
yield { | |
chatbot: gr.update(items=history), | |
state: gr.update(value=state_value), | |
} | |
except Exception as e: | |
history[-1]["loading"] = False | |
history[-1]["meta"]["end"] = True | |
history[-1]["meta"]["error"] = True | |
history[-1]["content"] = "Failed to respond, please try again." | |
yield { | |
chatbot: gr.update(items=history), | |
state: gr.update(value=state_value) | |
} | |
print('Error: ',e) | |
raise e | |
def submit(sender_value, state_value): | |
if not state_value["conversation_id"]: | |
random_id = str(uuid.uuid4()) | |
history = [] | |
state_value["conversation_id"] = random_id | |
state_value["conversations_history"][random_id] = history | |
state_value["conversations"].append({ | |
"label": sender_value, | |
"key": random_id | |
}) | |
history = state_value["conversations_history"][ | |
state_value["conversation_id"]] | |
history.append({ | |
"role": "user", | |
"meta": {}, | |
"key": str(uuid.uuid4()), | |
"content": sender_value | |
}) | |
# preprocess submit | |
yield Gradio_Events.preprocess_submit()(state_value) | |
try: | |
for chunk in Gradio_Events._submit(state_value): | |
yield chunk | |
except Exception as e: | |
raise e | |
finally: | |
# postprocess submit | |
yield Gradio_Events.postprocess_submit(state_value) | |
def regenerate_message(state_value, e: gr.EventData): | |
conversation_key = e._data["component"]["conversationKey"] | |
history = state_value["conversations_history"][ | |
state_value["conversation_id"]] | |
index = -1 | |
for i, conversation in enumerate(history): | |
if conversation["key"] == conversation_key: | |
index = i | |
break | |
if index == -1: | |
yield gr.skip() | |
history = history[:index] | |
state_value["conversations_history"][ | |
state_value["conversation_id"]] = history | |
yield { | |
chatbot:gr.update(items=history), | |
state: gr.update(value=state_value) | |
} | |
# preprocess submit | |
yield Gradio_Events.preprocess_submit(clear_input=False)(state_value) | |
try: | |
for chunk in Gradio_Events._submit(state_value): | |
yield chunk | |
except Exception as e: | |
raise e | |
finally: | |
# postprocess submit | |
yield Gradio_Events.postprocess_submit(state_value) | |
def preprocess_submit(clear_input=True): | |
def preprocess_submit_handler(state_value): | |
history = state_value["conversations_history"][ | |
state_value["conversation_id"]] | |
for conversation in history: | |
if "meta" in conversation: | |
conversation["meta"]["disabled"] = True | |
return { | |
sender: gr.update(value=None, loading=True) if clear_input else gr.update(loading=True), | |
conversations: | |
gr.update(active_key=state_value["conversation_id"], | |
items=list( | |
map( | |
lambda item: { | |
**item, | |
"disabled": | |
True if item["key"] != state_value[ | |
"conversation_id"] else False, | |
}, state_value["conversations"]))), | |
add_conversation_btn: | |
gr.update(disabled=True), | |
clear_btn: | |
gr.update(disabled=True), | |
conversation_delete_menu_item: | |
gr.update(disabled=True), | |
chatbot: | |
gr.update(items=history), | |
state: | |
gr.update(value=state_value), | |
} | |
return preprocess_submit_handler | |
def postprocess_submit(state_value): | |
history = state_value["conversations_history"][ | |
state_value["conversation_id"]] | |
for conversation in history: | |
if "meta" in conversation: | |
conversation["meta"]["disabled"] = False | |
return { | |
sender: gr.update(loading=False), | |
conversation_delete_menu_item: gr.update(disabled=False), | |
clear_btn: gr.update(disabled=False), | |
conversations: gr.update(items=state_value["conversations"]), | |
add_conversation_btn: gr.update(disabled=False), | |
chatbot: gr.update(items=history), | |
state: gr.update(value=state_value), | |
} | |
def cancel(state_value): | |
history = state_value["conversations_history"][ | |
state_value["conversation_id"]] | |
history[-1]["loading"] = False | |
history[-1]["meta"]["end"] = True | |
history[-1]["meta"]["canceled"] = True | |
return Gradio_Events.postprocess_submit(state_value) | |
def delete_message(state_value, e: gr.EventData): | |
conversation_key = e._data["component"]["conversationKey"] | |
history = state_value["conversations_history"][ | |
state_value["conversation_id"]] | |
history = [item for item in history if item["key"] != conversation_key] | |
state_value["conversations_history"][ | |
state_value["conversation_id"]] = history | |
return gr.update(items=history if len(history) > | |
0 else DEFAULT_CONVERSATIONS_HISTORY), gr.update( | |
value=state_value) | |
def edit_message(state_value, e: gr.EventData): | |
conversation_key = e._data["component"]["conversationKey"] | |
history = state_value["conversations_history"][ | |
state_value["conversation_id"]] | |
index = -1 | |
for i, conversation in enumerate(history): | |
if conversation["key"] == conversation_key: | |
index = i | |
break | |
if index == -1: | |
return gr.skip() | |
state_value["editing_message_index"] = index | |
text = '' | |
if isinstance(history[index]["content"], str): | |
text = history[index]["content"] | |
else: | |
text = history[index]["content"]["text"] | |
return gr.update(value=text), gr.update(value=state_value) | |
def confirm_edit_message(edit_textarea_value, state_value): | |
history = state_value["conversations_history"][ | |
state_value["conversation_id"]] | |
message = history[state_value["editing_message_index"]] | |
if isinstance(message["content"], str): | |
message["content"] = edit_textarea_value | |
else: | |
message["content"]["text"] = edit_textarea_value | |
return gr.update(items=history), gr.update(value=state_value) | |
def select_suggestion(sender_value, e: gr.EventData): | |
return gr.update(value=sender_value[:-1] + e._data["payload"][0]) | |
def apply_prompt(e: gr.EventData): | |
return gr.update(value=e._data["payload"][0]["data"]["description"]) | |
def new_chat(state_value): | |
if not state_value["conversation_id"]: | |
return gr.skip() | |
state_value["conversation_id"] = "" | |
return gr.update(active_key=state_value["conversation_id"]), gr.update( | |
items=DEFAULT_CONVERSATIONS_HISTORY), gr.update(value=state_value) | |
def select_conversation(state_value, e: gr.EventData): | |
active_key = e._data["payload"][0] | |
if state_value["conversation_id"] == active_key or ( | |
active_key not in state_value["conversations_history"]): | |
return gr.skip() | |
state_value["conversation_id"] = active_key | |
return gr.update(active_key=active_key), gr.update( | |
items=state_value["conversations_history"][active_key]), gr.update( | |
value=state_value) | |
def click_conversation_menu(state_value, e: gr.EventData): | |
conversation_id = e._data["payload"][0]["key"] | |
operation = e._data["payload"][1]["key"] | |
if operation == "delete": | |
del state_value["conversations_history"][conversation_id] | |
state_value["conversations"] = [ | |
item for item in state_value["conversations"] | |
if item["key"] != conversation_id | |
] | |
if state_value["conversation_id"] == conversation_id: | |
state_value["conversation_id"] = "" | |
return gr.update( | |
items=state_value["conversations"], | |
active_key=state_value["conversation_id"]), gr.update( | |
items=DEFAULT_CONVERSATIONS_HISTORY), gr.update( | |
value=state_value) | |
else: | |
return gr.update( | |
items=state_value["conversations"]), gr.skip(), gr.update( | |
value=state_value) | |
return gr.skip() | |
def clear_conversation_history(state_value): | |
if not state_value["conversation_id"]: | |
return gr.skip() | |
state_value["conversations_history"][ | |
state_value["conversation_id"]] = [] | |
return gr.update(items=DEFAULT_CONVERSATIONS_HISTORY), gr.update( | |
value=state_value) | |
def close_modal(): | |
return gr.update(open=False) | |
def open_modal(): | |
return gr.update(open=True) | |
def update_browser_state(state_value): | |
return gr.update(value=dict( | |
conversations=state_value["conversations"], | |
conversations_history=state_value["conversations_history"])) | |
def apply_browser_state(browser_state_value, state_value): | |
state_value["conversations"] = browser_state_value["conversations"] | |
state_value["conversations_history"] = browser_state_value[ | |
"conversations_history"] | |
return gr.update( | |
items=browser_state_value["conversations"]), gr.update( | |
value=state_value) | |
css = """ | |
.gradio-container { | |
padding: 0 !important; | |
} | |
.gradio-container > main.fillable { | |
padding: 0 !important; | |
} | |
#chatbot { | |
height: calc(100vh - 21px - 16px); | |
} | |
#chatbot .chatbot-conversations { | |
height: 100%; | |
background-color: var(--ms-gr-ant-color-bg-layout); | |
} | |
#chatbot .chatbot-conversations .chatbot-conversations-list { | |
padding-left: 0; | |
padding-right: 0; | |
} | |
#chatbot .chatbot-chat { | |
padding: 32px; | |
height: 100%; | |
} | |
@media (max-width: 768px) { | |
#chatbot .chatbot-chat { | |
padding: 0; | |
} | |
} | |
#chatbot .chatbot-chat .chatbot-chat-messages { | |
flex: 1; | |
} | |
#chatbot .chatbot-chat .chatbot-chat-messages .chatbot-chat-message .chatbot-chat-message-footer { | |
visibility: hidden; | |
opacity: 0; | |
transition: opacity 0.2s; | |
} | |
#chatbot .chatbot-chat .chatbot-chat-messages .chatbot-chat-message:last-child .chatbot-chat-message-footer { | |
visibility: visible; | |
opacity: 1; | |
} | |
#chatbot .chatbot-chat .chatbot-chat-messages .chatbot-chat-message:hover .chatbot-chat-message-footer { | |
visibility: visible; | |
opacity: 1; | |
} | |
""" | |
def logo(): | |
with antd.Typography.Title(level=1, | |
elem_style=dict(fontSize=24, | |
padding=8, | |
margin=0)): | |
with antd.Flex(align="center", gap="small", justify="center"): | |
antd.Image(qwen_logo, | |
preview=False, | |
alt="logo", | |
width=24, | |
height=24) | |
ms.Span("QwQ-32B") | |
with gr.Blocks(css=css, fill_width=True) as demo: | |
state = gr.State({ | |
"conversations_history": {}, | |
"conversations": [], | |
"conversation_id": "", | |
"editing_message_index": -1, | |
}) | |
with ms.Application(), antdx.XProvider( | |
theme=DEFAULT_THEME, locale=DEFAULT_LOCALE), ms.AutoLoading(): | |
with antd.Row(gutter=[20, 20], wrap=False, elem_id="chatbot"): | |
# Left Column | |
with antd.Col(md=dict(flex="0 0 260px", span=24, order=0), | |
span=0, | |
order=1, | |
elem_classes="chatbot-conversations"): | |
with antd.Flex(vertical=True, | |
gap="small", | |
elem_style=dict(height="100%")): | |
# Logo | |
logo() | |
# New Conversation Button | |
with antd.Button(value=None, | |
color="primary", | |
variant="filled", | |
block=True) as add_conversation_btn: | |
ms.Text(get_text("New Conversation", "新建对话")) | |
with ms.Slot("icon"): | |
antd.Icon("PlusOutlined") | |
# Conversations List | |
with antdx.Conversations( | |
elem_classes="chatbot-conversations-list", | |
) as conversations: | |
with ms.Slot('menu.items'): | |
with antd.Menu.Item( | |
label="Delete", key="delete", danger=True | |
) as conversation_delete_menu_item: | |
with ms.Slot("icon"): | |
antd.Icon("DeleteOutlined") | |
# Right Column | |
with antd.Col(flex=1, elem_style=dict(height="100%")): | |
with antd.Flex(vertical=True, | |
gap="middle", | |
elem_classes="chatbot-chat"): | |
# Chatbot | |
with antdx.Bubble.List( | |
items=DEFAULT_CONVERSATIONS_HISTORY, | |
elem_classes="chatbot-chat-messages") as chatbot: | |
# Define Chatbot Roles | |
with ms.Slot("roles"): | |
# Placeholder Role | |
with antdx.Bubble.List.Role( | |
role="placeholder", | |
styles=dict(content=dict(width="100%")), | |
variant="borderless"): | |
with ms.Slot("messageRender"): | |
with antd.Space( | |
direction="vertical", | |
size=16, | |
elem_style=dict(width="100%")): | |
with antdx.Welcome( | |
styles=dict(icon=dict( | |
flexShrink=0)), | |
variant="borderless", | |
title=get_text( | |
"Hello, I'm QwQ-32B", | |
"你好,我是 QwQ-32B"), | |
description=get_text( | |
"You can type text to get started.", | |
"你可以输入文本开始对话。"), | |
): | |
with ms.Slot("icon"): | |
antd.Image(qwen_logo, | |
preview=False) | |
with antdx.Prompts(title=get_text( | |
"How can I help you today?", | |
"有什么我能帮助你的吗?"), | |
styles={ | |
"list": { | |
"width": | |
'100%', | |
}, | |
"item": { | |
"flex": 1, | |
}, | |
}) as prompts: | |
for item in DEFAULT_PROMPTS: | |
with antdx.Prompts.Item( | |
label=item["category"] | |
): | |
for prompt in item[ | |
"prompts"]: | |
antdx.Prompts.Item( | |
description=prompt, | |
) | |
# User Role | |
with antdx.Bubble.List.Role( | |
role="user", | |
placement="end", | |
elem_classes="chatbot-chat-message", | |
class_names=dict( | |
footer="chatbot-chat-message-footer"), | |
styles=dict(content=dict( | |
maxWidth="100%", | |
overflow='auto', | |
))): | |
with ms.Slot( | |
"messageRender", | |
params_mapping="(content) => content"): | |
ms.Markdown() | |
with ms.Slot("footer", | |
params_mapping="""(bubble) => { | |
return { | |
copy_btn: { | |
copyable: { text: typeof bubble.content === 'string' ? bubble.content : bubble.content?.text, tooltips: false }, | |
}, | |
edit_btn: { conversationKey: bubble.key, disabled: bubble.meta.disabled }, | |
delete_btn: { conversationKey: bubble.key, disabled: bubble.meta.disabled }, | |
}; | |
}"""): | |
with antd.Typography.Text( | |
copyable=dict(tooltips=False), | |
as_item="copy_btn"): | |
with ms.Slot("copyable.icon"): | |
with antd.Button(value=None, | |
size="small", | |
color="default", | |
variant="text"): | |
with ms.Slot("icon"): | |
antd.Icon("CopyOutlined") | |
with antd.Button(value=None, | |
size="small", | |
color="default", | |
variant="text"): | |
with ms.Slot("icon"): | |
antd.Icon("CheckOutlined") | |
with antd.Button(value=None, | |
size="small", | |
color="default", | |
variant="text", | |
as_item="edit_btn" | |
) as user_edit_btn: | |
with ms.Slot("icon"): | |
antd.Icon("EditOutlined") | |
with antd.Popconfirm( | |
title="Delete the message", | |
description= | |
"Are you sure to delete this message?", | |
ok_button_props=dict(danger=True), | |
as_item="delete_btn" | |
) as user_delete_popconfirm: | |
with antd.Button(value=None, | |
size="small", | |
color="default", | |
variant="text", | |
as_item="delete_btn"): | |
with ms.Slot("icon"): | |
antd.Icon("DeleteOutlined") | |
# Chatbot Role | |
with antdx.Bubble.List.Role( | |
role="assistant", | |
placement="start", | |
elem_classes="chatbot-chat-message", | |
class_names=dict( | |
footer="chatbot-chat-message-footer"), | |
styles=dict(content=dict( | |
maxWidth="100%", overflow='auto'))): | |
with ms.Slot("avatar"): | |
antd.Avatar( | |
os.path.join(os.path.dirname(__file__), | |
"qwen.png")) | |
with ms.Slot( | |
"messageRender", | |
params_mapping="""(content, bubble) => { | |
const reason_content = bubble?.meta?.reason_content | |
const has_error = bubble?.meta?.error | |
return { | |
reasoning: reason_content || content, | |
reasoning_container: has_error ? { style: { display: 'none' } } : undefined, | |
answer: { | |
value: reason_content || has_error ? content : undefined | |
}, | |
collapse_label: bubble.meta?.thought_end_message, | |
collapse_progress: bubble.meta?.thought_end_message ? { style: { display: 'none' } } : undefined, | |
canceled: bubble.meta?.canceled ? undefined : { style: { display: 'none' } } | |
} | |
}"""): | |
with antd.Flex(vertical=True, | |
gap="middle"): | |
with antd.Collapse( | |
default_active_key=[ | |
"reasoning" | |
], | |
as_item="reasoning_container"): | |
with antd.Collapse.Item( | |
key="reasoning"): | |
with ms.Slot("label"): | |
with antd.Space( | |
size="middle"): | |
ms.Span( | |
get_text( | |
"Thinking...", | |
"思考中..."), | |
as_item= | |
"collapse_label") | |
antd.Progress( | |
percent="100", | |
status="active", | |
elem_style=dict( | |
display="flex", | |
alignItems= | |
"center", | |
), | |
show_info=False, | |
size=[110, 5], | |
as_item= | |
"collapse_progress" | |
) | |
with antd.Alert( | |
type="warning"): | |
with ms.Slot( | |
"description"): | |
ms.Markdown( | |
as_item="reasoning" | |
) | |
ms.Markdown( | |
as_item="answer", | |
elem_classes="answer-content") | |
antd.Divider(as_item="canceled") | |
antd.Typography.Text(get_text( | |
"Chat completion paused.", "聊天已暂停。"), | |
as_item="canceled", | |
type="warning") | |
with ms.Slot("footer", | |
params_mapping="""(bubble) => { | |
if (bubble?.meta?.end) { | |
return { | |
copy_btn: { | |
copyable: { text: bubble.content, tooltips: false }, | |
}, | |
regenerate_btn: { conversationKey: bubble.key, disabled: bubble.meta.disabled }, | |
delete_btn: { conversationKey: bubble.key, disabled: bubble.meta.disabled }, | |
edit_btn: { conversationKey: bubble.key, disabled: bubble.meta.disabled }, | |
}; | |
} | |
return { actions_container: { style: { display: 'none' } } }; | |
}"""): | |
with ms.Div(as_item="actions_container"): | |
with antd.Typography.Text( | |
copyable=dict(tooltips=False), | |
as_item="copy_btn"): | |
with ms.Slot("copyable.icon"): | |
with antd.Button( | |
value=None, | |
size="small", | |
color="default", | |
variant="text"): | |
with ms.Slot("icon"): | |
antd.Icon( | |
"CopyOutlined") | |
with antd.Button( | |
value=None, | |
size="small", | |
color="default", | |
variant="text"): | |
with ms.Slot("icon"): | |
antd.Icon( | |
"CheckOutlined") | |
with antd.Popconfirm( | |
title=get_text( | |
"Regenerate the message", | |
"重新生成消息"), | |
description=get_text( | |
"Regenerate the message will also delete all subsequent messages.", | |
"重新生成消息将会删除所有的后续消息。"), | |
ok_button_props=dict( | |
danger=True), | |
as_item="regenerate_btn" | |
) as chatbot_regenerate_popconfirm: | |
with antd.Button( | |
value=None, | |
size="small", | |
color="default", | |
variant="text", | |
as_item="regenerate_btn", | |
): | |
with ms.Slot("icon"): | |
antd.Icon("SyncOutlined") | |
with antd.Button(value=None, | |
size="small", | |
color="default", | |
variant="text", | |
as_item="edit_btn" | |
) as chatbot_edit_btn: | |
with ms.Slot("icon"): | |
antd.Icon("EditOutlined") | |
with antd.Popconfirm( | |
title=get_text("Delete the message", "删除消息"), | |
description=get_text( | |
"Are you sure to delete this message?", | |
"确定要删除这条消息吗?"), | |
ok_button_props=dict( | |
danger=True), | |
as_item="delete_btn" | |
) as chatbot_delete_popconfirm: | |
with antd.Button( | |
value=None, | |
size="small", | |
color="default", | |
variant="text", | |
as_item="delete_btn"): | |
with ms.Slot("icon"): | |
antd.Icon("DeleteOutlined") | |
# Sender | |
with antdx.Suggestion( | |
items=DEFAULT_SUGGESTIONS, | |
# onKeyDown Handler in Javascript | |
should_trigger="""(e, { onTrigger, onKeyDown }) => { | |
switch(e.key) { | |
case '/': | |
onTrigger() | |
break | |
case 'ArrowRight': | |
case 'ArrowLeft': | |
case 'ArrowUp': | |
case 'ArrowDown': | |
break; | |
default: | |
onTrigger(false) | |
} | |
onKeyDown(e) | |
}""") as suggestion: | |
with ms.Slot("children"): | |
with antdx.Sender(placeholder=get_text( | |
"Enter / to get suggestions", | |
"输入 / 获取建议"), ) as sender: | |
with ms.Slot("prefix"): | |
# Clear Button | |
with antd.Tooltip(title=get_text( | |
"Clear Conversation History", | |
"清空对话历史"), ): | |
with antd.Button( | |
value=None, | |
type="text") as clear_btn: | |
with ms.Slot("icon"): | |
antd.Icon("ClearOutlined") | |
# Modals | |
with antd.Modal(title=get_text("Edit Message", "编辑消息"), | |
open=False, | |
centered=True, | |
width="60%") as edit_modal: | |
edit_textarea = antd.Input.Textarea(auto_size=dict(minRows=2, | |
maxRows=6), | |
elem_style=dict(width="100%")) | |
# Events Handler | |
if save_history: | |
browser_state = gr.BrowserState( | |
{ | |
"conversations_history": {}, | |
"conversations": [], | |
}, | |
storage_key="qwen_qwq_chatbot_storage") | |
state.change(fn=Gradio_Events.update_browser_state, | |
inputs=[state], | |
outputs=[browser_state]) | |
demo.load(fn=Gradio_Events.apply_browser_state, | |
inputs=[browser_state, state], | |
outputs=[conversations, state]) | |
add_conversation_btn.click(fn=Gradio_Events.new_chat, | |
inputs=[state], | |
outputs=[conversations, chatbot, state]) | |
conversations.active_change(fn=Gradio_Events.select_conversation, | |
inputs=[state], | |
outputs=[conversations, chatbot, state]) | |
conversations.menu_click(fn=Gradio_Events.click_conversation_menu, | |
inputs=[state], | |
outputs=[conversations, chatbot, state]) | |
prompts.item_click(fn=Gradio_Events.apply_prompt, outputs=[sender]) | |
clear_btn.click(fn=Gradio_Events.clear_conversation_history, | |
inputs=[state], | |
outputs=[chatbot, state]) | |
suggestion.select(fn=Gradio_Events.select_suggestion, | |
inputs=[sender], | |
outputs=[sender]) | |
gr.on(triggers=[user_edit_btn.click, chatbot_edit_btn.click], | |
fn=Gradio_Events.edit_message, | |
inputs=[state], | |
outputs=[edit_textarea, state]).then(fn=Gradio_Events.open_modal, | |
outputs=[edit_modal]) | |
edit_modal.ok(fn=Gradio_Events.confirm_edit_message, | |
inputs=[edit_textarea, state], | |
outputs=[chatbot, state]).then(fn=Gradio_Events.close_modal, | |
outputs=[edit_modal]) | |
edit_modal.cancel(fn=Gradio_Events.close_modal, outputs=[edit_modal]) | |
gr.on(triggers=[ | |
chatbot_delete_popconfirm.confirm, user_delete_popconfirm.confirm | |
], | |
fn=Gradio_Events.delete_message, | |
inputs=[state], | |
outputs=[chatbot, state]) | |
regenerating_event = chatbot_regenerate_popconfirm.confirm( | |
fn=Gradio_Events.regenerate_message, | |
inputs=[state], | |
outputs=[sender, clear_btn, conversation_delete_menu_item, add_conversation_btn, conversations, chatbot, state]) | |
submit_event = sender.submit(fn=Gradio_Events.submit, | |
inputs=[sender, state], | |
outputs=[sender, clear_btn, conversation_delete_menu_item, | |
add_conversation_btn, conversations,chatbot, state]) | |
sender.cancel(fn=None, cancels=[submit_event, regenerating_event]) | |
sender.cancel(fn=Gradio_Events.cancel, | |
inputs=[state], | |
outputs=[ | |
sender, conversation_delete_menu_item, clear_btn, | |
conversations, add_conversation_btn, chatbot, state | |
]) | |
if __name__ == "__main__": | |
demo.queue(default_concurrency_limit=200).launch(ssr_mode=False, max_threads=200) | |