Ali2206 commited on
Commit
befca65
·
verified ·
1 Parent(s): 9a8092d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -193
app.py CHANGED
@@ -10,7 +10,6 @@ import re
10
  from datetime import datetime
11
  import time
12
 
13
- # Configuration and setup
14
  persistent_dir = "/data/hf_cache"
15
  os.makedirs(persistent_dir, exist_ok=True)
16
 
@@ -40,33 +39,26 @@ def clean_response(text: str) -> str:
40
  text = text.encode('utf-8', 'surrogatepass').decode('utf-8')
41
  except UnicodeError:
42
  text = text.encode('utf-8', 'replace').decode('utf-8')
43
-
44
  text = re.sub(r"\[.*?\]|\bNone\b", "", text, flags=re.DOTALL)
45
  text = re.sub(r"\n{3,}", "\n\n", text)
46
  text = re.sub(r"[^\n#\-\*\w\s\.,:\(\)]+", "", text)
47
  return text.strip()
48
 
49
- def parse_excel_to_prompts(file_path: str) -> List[str]:
50
- try:
51
- xl = pd.ExcelFile(file_path)
52
- df = xl.parse(xl.sheet_names[0], header=0).fillna("")
53
- groups = df.groupby("Booking Number")
54
- prompts = []
55
-
56
- for booking, group in groups:
57
- records = []
58
- for _, row in group.iterrows():
59
- record = f"- {row['Form Name']}: {row['Form Item']} = {row['Item Response']} ({row['Interview Date']} by {row['Interviewer']})\n{row['Description']}"
60
- records.append(clean_response(record))
61
-
62
- record_text = "\n".join(records)
63
- prompt = f"""
64
- Patient Booking Number: {booking}
65
 
66
  Instructions:
67
- Analyze the following patient case for missed diagnoses, medication conflicts, incomplete assessments, and any urgent follow-up needed. Summarize under the markdown headings.
68
 
69
- Data:
70
  {record_text}
71
 
72
  ### Missed Diagnoses
@@ -81,18 +73,13 @@ Data:
81
  ### Urgent Follow-up
82
  - ...
83
  """
84
- prompts.append(prompt)
85
- return prompts
86
- except Exception as e:
87
- raise ValueError(f"Error parsing Excel file: {str(e)}")
88
 
89
  def init_agent():
90
  default_tool_path = os.path.abspath("data/new_tool.json")
91
  target_tool_path = os.path.join(tool_cache_dir, "new_tool.json")
92
-
93
  if not os.path.exists(target_tool_path):
94
  shutil.copy(default_tool_path, target_tool_path)
95
-
96
  agent = TxAgent(
97
  model_name="mims-harvard/TxAgent-T1-Llama-3.1-8B",
98
  rag_model_name="mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B",
@@ -107,180 +94,60 @@ def init_agent():
107
  return agent
108
 
109
  def create_ui(agent):
110
- with gr.Blocks(theme=gr.themes.Soft(), title="Clinical Oversight Assistant") as demo:
111
- gr.Markdown("# 🏥 Clinical Oversight Assistant (Excel Optimized)")
112
-
113
- with gr.Tabs():
114
- with gr.TabItem("Analysis"):
115
- with gr.Row():
116
- # Left column - Inputs
117
- with gr.Column(scale=1):
118
- file_upload = gr.File(
119
- label="Upload Excel File",
120
- file_types=[".xlsx"],
121
- file_count="single",
122
- interactive=True
123
- )
124
- msg_input = gr.Textbox(
125
- label="Additional Instructions",
126
- placeholder="Add any specific analysis requests...",
127
- lines=3
128
- )
129
- with gr.Row():
130
- clear_btn = gr.Button("Clear", variant="secondary")
131
- send_btn = gr.Button("Analyze", variant="primary")
132
-
133
- # Right column - Outputs
134
- with gr.Column(scale=2):
135
- chatbot = gr.Chatbot(
136
- label="Analysis Results",
137
- height=600,
138
- bubble_full_width=False,
139
- show_copy_button=True
140
- )
141
- download_output = gr.File(
142
- label="Download Full Report",
143
- interactive=False
144
- )
145
-
146
- with gr.TabItem("Instructions"):
147
- gr.Markdown("""
148
- ## How to Use This Tool
149
-
150
- 1. **Upload Excel File**: Select your patient records Excel file
151
- 2. **Add Instructions** (Optional): Provide any specific analysis requests
152
- 3. **Click Analyze**: The system will process each patient record
153
- 4. **Review Results**: Analysis appears in the chat window
154
- 5. **Download Report**: Get a full text report of all findings
155
-
156
- ### Excel File Requirements
157
- Your Excel file must contain these columns:
158
- - Booking Number
159
- - Form Name
160
- - Form Item
161
- - Item Response
162
- - Interview Date
163
- - Interviewer
164
- - Description
165
-
166
- ### Analysis Includes
167
- - Missed diagnoses
168
- - Medication conflicts
169
- - Incomplete assessments
170
- - Urgent follow-up needs
171
- """)
172
-
173
- def format_message(role: str, content: str) -> Tuple[str, str]:
174
- """Format messages for the chatbot in (user, bot) format"""
175
- if role == "user":
176
- return (content, None)
177
- else:
178
- return (None, content)
179
-
180
  def analyze(message: str, chat_history: List[Tuple[str, str]], file) -> Tuple[List[Tuple[str, str]], str]:
181
  if not file:
182
- raise gr.Error("Please upload an Excel file first")
183
-
 
 
 
184
  try:
185
- # Initialize chat history with user message
186
- new_history = chat_history + [format_message("user", message)]
187
- new_history.append(format_message("assistant", "⏳ Processing Excel data..."))
188
- yield new_history, None
189
-
190
- prompts = parse_excel_to_prompts(file.name)
191
  full_output = ""
192
-
193
- for idx, prompt in enumerate(prompts, 1):
194
- chunk_output = ""
195
- try:
196
- for result in agent.run_gradio_chat(
197
- message=prompt,
198
- history=[],
199
- temperature=0.2,
200
- max_new_tokens=1024,
201
- max_token=4096,
202
- call_agent=False,
203
- conversation=[],
204
- ):
205
- if isinstance(result, list):
206
- for r in result:
207
- if hasattr(r, 'content') and r.content:
208
- cleaned = clean_response(r.content)
209
- chunk_output += cleaned + "\n"
210
- elif isinstance(result, str):
211
- cleaned = clean_response(result)
212
- chunk_output += cleaned + "\n"
213
-
214
- if chunk_output:
215
- output = f"--- Booking {idx} ---\n{chunk_output.strip()}\n"
216
- new_history[-1] = format_message("assistant", output)
217
- yield new_history, None
218
-
219
- except Exception as e:
220
- error_msg = f"⚠️ Error processing booking {idx}: {str(e)}"
221
- new_history.append(format_message("assistant", error_msg))
222
- yield new_history, None
223
- continue
224
-
225
- if chunk_output:
226
- output = f"--- Booking {idx} ---\n{chunk_output.strip()}\n"
227
- new_history.append(format_message("assistant", output))
228
- full_output += output + "\n"
229
- yield new_history, None
230
-
231
- # Save report
232
- file_hash_value = file_hash(file.name)
233
- report_path = os.path.join(report_dir, f"{file_hash_value}_report.txt")
234
  with open(report_path, "w", encoding="utf-8") as f:
235
- f.write(full_output)
236
-
237
- yield new_history, report_path if os.path.exists(report_path) else None
238
-
239
  except Exception as e:
240
- new_history.append(format_message("assistant", f"❌ Error: {str(e)}"))
241
  yield new_history, None
242
- raise gr.Error(f"Analysis failed: {str(e)}")
243
-
244
- def clear_chat():
245
- return [], None
246
-
247
- # Event handlers
248
- send_btn.click(
249
- analyze,
250
- inputs=[msg_input, chatbot, file_upload],
251
- outputs=[chatbot, download_output],
252
- api_name="analyze"
253
- )
254
-
255
- msg_input.submit(
256
- analyze,
257
- inputs=[msg_input, chatbot, file_upload],
258
- outputs=[chatbot, download_output]
259
- )
260
-
261
- clear_btn.click(
262
- clear_chat,
263
- inputs=[],
264
- outputs=[chatbot, download_output]
265
- )
266
-
267
  return demo
268
 
269
  if __name__ == "__main__":
270
- try:
271
- agent = init_agent()
272
- demo = create_ui(agent)
273
-
274
- demo.queue(
275
- api_open=False,
276
- max_size=20
277
- ).launch(
278
- server_name="0.0.0.0",
279
- server_port=7860,
280
- show_error=True,
281
- allowed_paths=[report_dir],
282
- share=False
283
- )
284
- except Exception as e:
285
- print(f"Failed to launch application: {str(e)}")
286
- sys.exit(1)
 
10
  from datetime import datetime
11
  import time
12
 
 
13
  persistent_dir = "/data/hf_cache"
14
  os.makedirs(persistent_dir, exist_ok=True)
15
 
 
39
  text = text.encode('utf-8', 'surrogatepass').decode('utf-8')
40
  except UnicodeError:
41
  text = text.encode('utf-8', 'replace').decode('utf-8')
 
42
  text = re.sub(r"\[.*?\]|\bNone\b", "", text, flags=re.DOTALL)
43
  text = re.sub(r"\n{3,}", "\n\n", text)
44
  text = re.sub(r"[^\n#\-\*\w\s\.,:\(\)]+", "", text)
45
  return text.strip()
46
 
47
+ def parse_excel_as_whole_prompt(file_path: str) -> str:
48
+ xl = pd.ExcelFile(file_path)
49
+ df = xl.parse(xl.sheet_names[0], header=0).fillna("")
50
+ records = []
51
+ for _, row in df.iterrows():
52
+ record = f"- {row['Form Name']}: {row['Form Item']} = {row['Item Response']} ({row['Interview Date']} by {row['Interviewer']})\n{row['Description']}"
53
+ records.append(clean_response(record))
54
+ record_text = "\n".join(records)
55
+ prompt = f"""
56
+ Patient Complete History:
 
 
 
 
 
 
57
 
58
  Instructions:
59
+ Based on the complete patient record below, identify any potential missed diagnoses, medication conflicts, incomplete assessments, and urgent follow-up needs. Provide a clinical summary under the markdown headings.
60
 
61
+ Patient History:
62
  {record_text}
63
 
64
  ### Missed Diagnoses
 
73
  ### Urgent Follow-up
74
  - ...
75
  """
76
+ return prompt
 
 
 
77
 
78
  def init_agent():
79
  default_tool_path = os.path.abspath("data/new_tool.json")
80
  target_tool_path = os.path.join(tool_cache_dir, "new_tool.json")
 
81
  if not os.path.exists(target_tool_path):
82
  shutil.copy(default_tool_path, target_tool_path)
 
83
  agent = TxAgent(
84
  model_name="mims-harvard/TxAgent-T1-Llama-3.1-8B",
85
  rag_model_name="mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B",
 
94
  return agent
95
 
96
  def create_ui(agent):
97
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
98
+ gr.Markdown("<h1 style='text-align: center;'>\ud83c\udfe5 Full Medical History Analyzer</h1>")
99
+ chatbot = gr.Chatbot(label="Summary Output", height=600)
100
+ file_upload = gr.File(label="Upload Excel File", file_types=[".xlsx"], file_count="single")
101
+ msg_input = gr.Textbox(label="Optional Message", placeholder="Add context or instructions...", lines=2)
102
+ send_btn = gr.Button("Analyze")
103
+ download_output = gr.File(label="Download Report")
104
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  def analyze(message: str, chat_history: List[Tuple[str, str]], file) -> Tuple[List[Tuple[str, str]], str]:
106
  if not file:
107
+ raise gr.Error("Please upload an Excel file.")
108
+ new_history = chat_history + [(message, None)]
109
+ new_history.append((None, "⏳ Analyzing full patient history..."))
110
+ yield new_history, None
111
+
112
  try:
113
+ prompt = parse_excel_as_whole_prompt(file.name)
 
 
 
 
 
114
  full_output = ""
115
+ for result in agent.run_gradio_chat(
116
+ message=prompt,
117
+ history=[],
118
+ temperature=0.2,
119
+ max_new_tokens=2048,
120
+ max_token=4096,
121
+ call_agent=False,
122
+ conversation=[],
123
+ ):
124
+ if isinstance(result, list):
125
+ for r in result:
126
+ if hasattr(r, 'content') and r.content:
127
+ full_output += clean_response(r.content) + "\n"
128
+ elif isinstance(result, str):
129
+ full_output += clean_response(result) + "\n"
130
+
131
+ new_history[-1] = (None, full_output.strip())
132
+ report_path = os.path.join(report_dir, f"{file_hash(file.name)}_final_report.txt")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  with open(report_path, "w", encoding="utf-8") as f:
134
+ f.write(full_output.strip())
135
+ yield new_history, report_path
 
 
136
  except Exception as e:
137
+ new_history.append((None, f"❌ Error during analysis: {str(e)}"))
138
  yield new_history, None
139
+
140
+ send_btn.click(analyze, inputs=[msg_input, chatbot, file_upload], outputs=[chatbot, download_output])
141
+ msg_input.submit(analyze, inputs=[msg_input, chatbot, file_upload], outputs=[chatbot, download_output])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
  return demo
143
 
144
  if __name__ == "__main__":
145
+ agent = init_agent()
146
+ demo = create_ui(agent)
147
+ demo.queue(api_open=False).launch(
148
+ server_name="0.0.0.0",
149
+ server_port=7860,
150
+ show_error=True,
151
+ allowed_paths=[report_dir],
152
+ share=False
153
+ )