Update app.py
Browse files
app.py
CHANGED
@@ -2,10 +2,10 @@ import os
|
|
2 |
import sys
|
3 |
import random
|
4 |
import gradio as gr
|
5 |
-
from multiprocessing import freeze_support
|
6 |
|
7 |
-
#
|
8 |
-
sys.path.
|
9 |
|
10 |
from txagent import TxAgent
|
11 |
|
@@ -63,8 +63,7 @@ question_examples = [
|
|
63 |
["A 30-year-old patient is on Prozac for depression and now diagnosed with WHIM syndrome. Is Xolremdi suitable?"]
|
64 |
]
|
65 |
|
66 |
-
|
67 |
-
def create_ui():
|
68 |
with gr.Blocks(css=css) as demo:
|
69 |
gr.Markdown(DESCRIPTION)
|
70 |
gr.Markdown(INTRO)
|
@@ -84,9 +83,9 @@ def create_ui():
|
|
84 |
show_copy_button=True
|
85 |
)
|
86 |
|
87 |
-
# === Chat handler (streaming)
|
88 |
async def handle_chat(message, history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
|
89 |
-
|
90 |
message=message,
|
91 |
history=history,
|
92 |
temperature=temperature,
|
@@ -96,8 +95,9 @@ def create_ui():
|
|
96 |
conversation=conversation,
|
97 |
max_round=max_round
|
98 |
)
|
|
|
99 |
|
100 |
-
# === Retry handler
|
101 |
async def handle_retry(history, retry_data, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
|
102 |
agent.update_parameters(seed=random.randint(0, 10000))
|
103 |
new_history = history[:retry_data.index]
|
@@ -113,13 +113,14 @@ def create_ui():
|
|
113 |
max_round=max_round
|
114 |
)
|
115 |
|
|
|
116 |
chatbot.retry(
|
117 |
handle_retry,
|
118 |
inputs=[chatbot, chatbot, temperature, max_new_tokens, max_tokens, multi_agent, conversation_state, max_round],
|
119 |
outputs=chatbot
|
120 |
)
|
121 |
|
122 |
-
# === Chat Interface setup
|
123 |
gr.ChatInterface(
|
124 |
fn=handle_chat,
|
125 |
chatbot=chatbot,
|
@@ -139,21 +140,30 @@ def create_ui():
|
|
139 |
return demo
|
140 |
|
141 |
if __name__ == "__main__":
|
142 |
-
freeze_support()
|
143 |
-
|
144 |
-
# Initialize agent
|
145 |
-
agent = TxAgent(
|
146 |
-
model_name,
|
147 |
-
rag_model_name,
|
148 |
-
tool_files_dict=new_tool_files,
|
149 |
-
force_finish=True,
|
150 |
-
enable_checker=True,
|
151 |
-
step_rag_num=10,
|
152 |
-
seed=100,
|
153 |
-
additional_default_tools=["DirectResponse", "RequireClarification"]
|
154 |
-
)
|
155 |
-
agent.init_model()
|
156 |
|
157 |
-
|
158 |
-
|
159 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import sys
|
3 |
import random
|
4 |
import gradio as gr
|
5 |
+
from multiprocessing import freeze_support
|
6 |
|
7 |
+
# Fix path to include src directory
|
8 |
+
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "src"))
|
9 |
|
10 |
from txagent import TxAgent
|
11 |
|
|
|
63 |
["A 30-year-old patient is on Prozac for depression and now diagnosed with WHIM syndrome. Is Xolremdi suitable?"]
|
64 |
]
|
65 |
|
66 |
+
def create_ui(agent):
|
|
|
67 |
with gr.Blocks(css=css) as demo:
|
68 |
gr.Markdown(DESCRIPTION)
|
69 |
gr.Markdown(INTRO)
|
|
|
83 |
show_copy_button=True
|
84 |
)
|
85 |
|
86 |
+
# === Chat handler (streaming) ===
|
87 |
async def handle_chat(message, history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
|
88 |
+
response = await agent.run_gradio_chat(
|
89 |
message=message,
|
90 |
history=history,
|
91 |
temperature=temperature,
|
|
|
95 |
conversation=conversation,
|
96 |
max_round=max_round
|
97 |
)
|
98 |
+
return response
|
99 |
|
100 |
+
# === Retry handler ===
|
101 |
async def handle_retry(history, retry_data, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
|
102 |
agent.update_parameters(seed=random.randint(0, 10000))
|
103 |
new_history = history[:retry_data.index]
|
|
|
113 |
max_round=max_round
|
114 |
)
|
115 |
|
116 |
+
# Configure retry button
|
117 |
chatbot.retry(
|
118 |
handle_retry,
|
119 |
inputs=[chatbot, chatbot, temperature, max_new_tokens, max_tokens, multi_agent, conversation_state, max_round],
|
120 |
outputs=chatbot
|
121 |
)
|
122 |
|
123 |
+
# === Chat Interface setup ===
|
124 |
gr.ChatInterface(
|
125 |
fn=handle_chat,
|
126 |
chatbot=chatbot,
|
|
|
140 |
return demo
|
141 |
|
142 |
if __name__ == "__main__":
|
143 |
+
freeze_support()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
|
145 |
+
try:
|
146 |
+
# Initialize agent
|
147 |
+
agent = TxAgent(
|
148 |
+
model_name,
|
149 |
+
rag_model_name,
|
150 |
+
tool_files_dict=new_tool_files,
|
151 |
+
force_finish=True,
|
152 |
+
enable_checker=True,
|
153 |
+
step_rag_num=10,
|
154 |
+
seed=100,
|
155 |
+
additional_default_tools=["DirectResponse", "RequireClarification"]
|
156 |
+
)
|
157 |
+
agent.init_model()
|
158 |
+
|
159 |
+
# Verify the agent has the required method
|
160 |
+
if not hasattr(agent, 'run_gradio_chat'):
|
161 |
+
raise AttributeError("The TxAgent instance is missing the run_gradio_chat method!")
|
162 |
+
|
163 |
+
# Create and launch UI
|
164 |
+
demo = create_ui(agent)
|
165 |
+
demo.launch(show_error=True)
|
166 |
+
|
167 |
+
except Exception as e:
|
168 |
+
print(f"Application failed to start: {str(e)}")
|
169 |
+
raise
|