Ali2206 commited on
Commit
1c98688
·
verified ·
1 Parent(s): 0ec5bb2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +110 -6
app.py CHANGED
@@ -1,10 +1,114 @@
1
  import gradio as gr
 
2
 
3
- def echo(msg, history):
4
- return history + [(msg, f"Echo: {msg}")]
5
 
6
- with gr.Blocks() as app:
7
- chatbot = gr.Chatbot(label="EchoBot", type="messages")
8
- msg = gr.Textbox(label="Type a message")
9
 
10
- msg.submit(echo, [msg, chatbot], chatbot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import logging
3
 
4
+ logging.basicConfig(level=logging.INFO)
5
+ logger = logging.getLogger(__name__)
6
 
7
+ tx_app = None # global agent
 
 
8
 
9
+ def respond(message, chat_history, temperature, max_new_tokens, max_tokens, multi_agent, conversation_state, max_round):
10
+ global tx_app
11
+ if tx_app is None:
12
+ return chat_history + [("", "⚠️ Model is still loading. Please wait a few seconds and try again.")]
13
+
14
+ try:
15
+ if not isinstance(message, str) or len(message.strip()) < 10:
16
+ return chat_history + [("", "Please enter a longer message.")]
17
+
18
+ if chat_history and isinstance(chat_history[0], dict):
19
+ chat_history = [(h["role"], h["content"]) for h in chat_history if "role" in h and "content" in h]
20
+
21
+ response = ""
22
+ for chunk in tx_app.run_gradio_chat(
23
+ message=message.strip(),
24
+ history=chat_history,
25
+ temperature=temperature,
26
+ max_new_tokens=max_new_tokens,
27
+ max_token=max_tokens,
28
+ call_agent=multi_agent,
29
+ conversation=conversation_state,
30
+ max_round=max_round,
31
+ seed=42,
32
+ ):
33
+ if isinstance(chunk, dict):
34
+ response += chunk.get("content", "")
35
+ elif isinstance(chunk, str):
36
+ response += chunk
37
+ else:
38
+ response += str(chunk)
39
+
40
+ yield chat_history + [("user", message), ("assistant", response)]
41
+ except Exception as e:
42
+ logger.error(f"Respond error: {e}")
43
+ yield chat_history + [("", f"⚠️ Error: {e}")]
44
+
45
+ # Define Gradio app at module level so Hugging Face Spaces can find it
46
+ with gr.Blocks(title="TxAgent Biomedical Assistant") as app:
47
+ gr.Markdown("# 🧠 TxAgent Biomedical Assistant")
48
+
49
+ chatbot = gr.Chatbot(label="Conversation", height=600, type="messages")
50
+ msg = gr.Textbox(label="Your medical query", placeholder="Type here...", lines=3)
51
+
52
+ with gr.Row():
53
+ temp = gr.Slider(0, 1, value=0.3, label="Temperature")
54
+ max_new_tokens = gr.Slider(128, 4096, value=1024, label="Max New Tokens")
55
+ max_tokens = gr.Slider(128, 81920, value=81920, label="Max Total Tokens")
56
+ max_rounds = gr.Slider(1, 30, value=10, label="Max Rounds")
57
+ multi_agent = gr.Checkbox(label="Multi-Agent Mode")
58
+
59
+ conversation_state = gr.State([])
60
+ submit = gr.Button("Submit")
61
+ clear = gr.Button("Clear")
62
+
63
+ submit.click(
64
+ respond,
65
+ [msg, chatbot, temp, max_new_tokens, max_tokens, multi_agent, conversation_state, max_rounds],
66
+ chatbot
67
+ )
68
+ clear.click(lambda: [], None, chatbot)
69
+ msg.submit(
70
+ respond,
71
+ [msg, chatbot, temp, max_new_tokens, max_tokens, multi_agent, conversation_state, max_rounds],
72
+ chatbot
73
+ )
74
+
75
+ # 🔥 Safely initialize vLLM inside __main__
76
+ if __name__ == "__main__":
77
+ import multiprocessing
78
+ multiprocessing.set_start_method("spawn", force=True)
79
+
80
+ import torch
81
+ from txagent import TxAgent
82
+ from importlib.resources import files
83
+
84
+ logger.info("🔥 Initializing TxAgent safely in __main__")
85
+
86
+ tool_files = {
87
+ "opentarget": str(files('tooluniverse.data').joinpath('opentarget_tools.json')),
88
+ "fda_drug_label": str(files('tooluniverse.data').joinpath('fda_drug_labeling_tools.json')),
89
+ "special_tools": str(files('tooluniverse.data').joinpath('special_tools.json')),
90
+ "monarch": str(files('tooluniverse.data').joinpath('monarch_tools.json'))
91
+ }
92
+
93
+ tx_app = TxAgent(
94
+ model_name="mims-harvard/TxAgent-T1-Llama-3.1-8B",
95
+ rag_model_name="mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B",
96
+ tool_files_dict=tool_files,
97
+ enable_finish=True,
98
+ enable_rag=True,
99
+ enable_summary=False,
100
+ init_rag_num=0,
101
+ step_rag_num=10,
102
+ summary_mode='step',
103
+ summary_skip_last_k=0,
104
+ summary_context_length=None,
105
+ force_finish=True,
106
+ avoid_repeat=True,
107
+ seed=42,
108
+ enable_checker=True,
109
+ enable_chat=False,
110
+ additional_default_tools=["DirectResponse", "RequireClarification"]
111
+ )
112
+
113
+ tx_app.init_model()
114
+ logger.info("✅ TxAgent initialized.")