Ali2206 commited on
Commit
ecaf6bd
·
verified ·
1 Parent(s): 338b5ef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -55
app.py CHANGED
@@ -5,23 +5,15 @@ from multiprocessing import freeze_support
5
  import importlib
6
  import inspect
7
 
8
- # === Fix path to include src/txagent
9
  sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))
10
-
11
- # === Import and reload to ensure correct file
12
  import txagent.txagent
13
  importlib.reload(txagent.txagent)
14
  from txagent.txagent import TxAgent
15
 
16
- print(">>> TxAgent loaded from:", inspect.getfile(TxAgent))
17
- print(">>> TxAgent has run_gradio_chat:", hasattr(TxAgent, "run_gradio_chat"))
18
-
19
- # === Environment
20
  current_dir = os.path.abspath(os.path.dirname(__file__))
21
  os.environ["MKL_THREADING_LAYER"] = "GNU"
22
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
23
 
24
- # === Configs
25
  model_name = "mims-harvard/TxAgent-T1-Llama-3.1-8B"
26
  rag_model_name = "mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B"
27
  new_tool_files = {
@@ -29,23 +21,25 @@ new_tool_files = {
29
  }
30
 
31
  question_examples = [
32
- ["Given a patient with WHIM syndrome on prophylactic antibiotics, is it advisable to co-administer Xolremdi with fluconazole?"],
33
  ["What treatment options exist for HER2+ breast cancer resistant to trastuzumab?"]
34
  ]
35
 
36
- # === Format output in collapsible panels
37
- def format_collapsible_response(content):
38
- return (
39
- f"<details open style='border: 1px solid #ccc; padding: 8px; margin-top: 8px; border-radius: 6px;'>"
40
- f"<summary style='font-weight: bold; font-size: 16px;'>Answer</summary>"
41
- f"<div style='margin-top: 10px; line-height: 1.6;'>{content.strip()}</div></details>"
42
- )
 
 
 
43
 
44
- # === UI
45
  def create_ui(agent):
46
  with gr.Blocks() as demo:
47
  gr.Markdown("<h1 style='text-align: center;'>TxAgent: Therapeutic Reasoning</h1>")
48
- gr.Markdown("Ask biomedical or therapeutic questions. Powered by step-by-step reasoning and tools.")
49
 
50
  temperature = gr.Slider(0, 1, value=0.3, label="Temperature")
51
  max_new_tokens = gr.Slider(128, 4096, value=1024, label="Max New Tokens")
@@ -54,56 +48,65 @@ def create_ui(agent):
54
  multi_agent = gr.Checkbox(label="Enable Multi-agent Reasoning", value=False)
55
  conversation_state = gr.State([])
56
 
57
- chatbot = gr.Chatbot(label="TxAgent", height=600, type="messages")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  message_input = gr.Textbox(placeholder="Ask your biomedical question...", show_label=False)
59
  send_button = gr.Button("Send", variant="primary")
60
 
61
- # === Chat handler
62
  def handle_chat(message, history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
63
- try:
64
- generator = agent.run_gradio_chat(
65
- message=message,
66
- history=history,
67
- temperature=temperature,
68
- max_new_tokens=max_new_tokens,
69
- max_token=max_tokens,
70
- call_agent=multi_agent,
71
- conversation=conversation,
72
- max_round=max_round
73
- )
74
-
75
- for update in generator:
76
- formatted_messages = []
77
- for m in update:
78
- role = m["role"] if isinstance(m, dict) else getattr(m, "role", "assistant")
79
- content = m["content"] if isinstance(m, dict) else getattr(m, "content", "")
80
- if role == "assistant":
81
- content = format_collapsible_response(content)
82
- formatted_messages.append({"role": role, "content": content})
83
- yield formatted_messages
84
- except Exception as e:
85
- print("⚠️ Error in chat handler:", e)
86
- yield history + [{"role": "assistant", "content": f"An error occurred: {str(e)}"}]
87
-
88
- # === Actions
89
  send_button.click(
90
  fn=handle_chat,
91
- inputs=[message_input, chatbot, temperature, max_new_tokens, max_tokens, multi_agent, conversation_state, max_round],
92
- outputs=chatbot
93
  )
94
 
95
  message_input.submit(
96
  fn=handle_chat,
97
- inputs=[message_input, chatbot, temperature, max_new_tokens, max_tokens, multi_agent, conversation_state, max_round],
98
- outputs=chatbot
99
  )
100
 
101
  gr.Examples(examples=question_examples, inputs=message_input)
102
- gr.Markdown("**DISCLAIMER**: This demo is for research purposes only and does not provide medical advice.")
103
 
104
  return demo
105
 
106
- # === Entry point
107
  if __name__ == "__main__":
108
  freeze_support()
109
  try:
@@ -115,16 +118,16 @@ if __name__ == "__main__":
115
  enable_checker=True,
116
  step_rag_num=10,
117
  seed=100,
118
- additional_default_tools=[] # Avoid broken tools
119
  )
120
  agent.init_model()
121
 
122
  if not hasattr(agent, "run_gradio_chat"):
123
- raise AttributeError("❌ TxAgent is missing `run_gradio_chat`.")
124
 
125
  demo = create_ui(agent)
126
  demo.launch(show_error=True)
127
 
128
  except Exception as e:
129
- print(f"❌ Failed to launch app: {e}")
130
  raise
 
5
  import importlib
6
  import inspect
7
 
 
8
  sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))
 
 
9
  import txagent.txagent
10
  importlib.reload(txagent.txagent)
11
  from txagent.txagent import TxAgent
12
 
 
 
 
 
13
  current_dir = os.path.abspath(os.path.dirname(__file__))
14
  os.environ["MKL_THREADING_LAYER"] = "GNU"
15
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
16
 
 
17
  model_name = "mims-harvard/TxAgent-T1-Llama-3.1-8B"
18
  rag_model_name = "mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B"
19
  new_tool_files = {
 
21
  }
22
 
23
  question_examples = [
24
+ ["Given a patient with WHIM syndrome on antibiotics, is Xolremdi + fluconazole advisable?"],
25
  ["What treatment options exist for HER2+ breast cancer resistant to trastuzumab?"]
26
  ]
27
 
28
+ def extract_sections(content):
29
+ """
30
+ Example extractor splitting into sections. You should improve it to parse actual keys.
31
+ """
32
+ return {
33
+ "Summary": content[:1000], # simulate
34
+ "Clinical Studies": content[1000:2500],
35
+ "Drug Interactions": "See CYP3A4 interactions...",
36
+ "Pharmacokinetics": "- Absorption: Oral\n- Half-life: ~24h\n- Metabolized by CYP3A4"
37
+ }
38
 
 
39
  def create_ui(agent):
40
  with gr.Blocks() as demo:
41
  gr.Markdown("<h1 style='text-align: center;'>TxAgent: Therapeutic Reasoning</h1>")
42
+ gr.Markdown("Ask therapeutic or biomedical questions. Results are categorized for readability.")
43
 
44
  temperature = gr.Slider(0, 1, value=0.3, label="Temperature")
45
  max_new_tokens = gr.Slider(128, 4096, value=1024, label="Max New Tokens")
 
48
  multi_agent = gr.Checkbox(label="Enable Multi-agent Reasoning", value=False)
49
  conversation_state = gr.State([])
50
 
51
+ chatbot = gr.Tabs()
52
+ summary_box = gr.Markdown(label="Summary")
53
+ studies_box = gr.Markdown(label="Clinical Studies")
54
+ interactions_box = gr.Markdown(label="Drug Interactions")
55
+ kinetics_box = gr.Markdown(label="Pharmacokinetics")
56
+
57
+ with chatbot:
58
+ with gr.TabItem("Summary"):
59
+ summary_display = summary_box
60
+ with gr.TabItem("Clinical Studies"):
61
+ studies_display = studies_box
62
+ with gr.TabItem("Drug Interactions"):
63
+ interactions_display = interactions_box
64
+ with gr.TabItem("Pharmacokinetics"):
65
+ kinetics_display = kinetics_box
66
+
67
  message_input = gr.Textbox(placeholder="Ask your biomedical question...", show_label=False)
68
  send_button = gr.Button("Send", variant="primary")
69
 
 
70
  def handle_chat(message, history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
71
+ generator = agent.run_gradio_chat(
72
+ message=message,
73
+ history=history,
74
+ temperature=temperature,
75
+ max_new_tokens=max_new_tokens,
76
+ max_token=max_tokens,
77
+ call_agent=multi_agent,
78
+ conversation=conversation,
79
+ max_round=max_round
80
+ )
81
+
82
+ final_output = ""
83
+ for update in generator:
84
+ for m in update:
85
+ role = m["role"] if isinstance(m, dict) else getattr(m, "role", "assistant")
86
+ content = m["content"] if isinstance(m, dict) else getattr(m, "content", "")
87
+ if role == "assistant":
88
+ final_output += content + "\n"
89
+
90
+ sections = extract_sections(final_output)
91
+ return sections["Summary"], sections["Clinical Studies"], sections["Drug Interactions"], sections["Pharmacokinetics"]
92
+
 
 
 
 
93
  send_button.click(
94
  fn=handle_chat,
95
+ inputs=[message_input, [], temperature, max_new_tokens, max_tokens, multi_agent, conversation_state, max_round],
96
+ outputs=[summary_box, studies_box, interactions_box, kinetics_box]
97
  )
98
 
99
  message_input.submit(
100
  fn=handle_chat,
101
+ inputs=[message_input, [], temperature, max_new_tokens, max_tokens, multi_agent, conversation_state, max_round],
102
+ outputs=[summary_box, studies_box, interactions_box, kinetics_box]
103
  )
104
 
105
  gr.Examples(examples=question_examples, inputs=message_input)
106
+ gr.Markdown("**DISCLAIMER**: For research only. Not medical advice.")
107
 
108
  return demo
109
 
 
110
  if __name__ == "__main__":
111
  freeze_support()
112
  try:
 
118
  enable_checker=True,
119
  step_rag_num=10,
120
  seed=100,
121
+ additional_default_tools=[]
122
  )
123
  agent.init_model()
124
 
125
  if not hasattr(agent, "run_gradio_chat"):
126
+ raise AttributeError("❌ TxAgent missing `run_gradio_chat`")
127
 
128
  demo = create_ui(agent)
129
  demo.launch(show_error=True)
130
 
131
  except Exception as e:
132
+ print(f"❌ App failed to start: {e}")
133
  raise