Ali2206 commited on
Commit
6032958
Β·
verified Β·
1 Parent(s): b321961

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +134 -74
app.py CHANGED
@@ -131,8 +131,9 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
131
  return messages, report_path
132
 
133
  try:
134
- messages.append({"role": "user", "content": f"Processing Excel file: {os.path.basename(file.name)}"})
135
- messages.append({"role": "assistant", "content": "⏳ Extracting and analyzing data..."})
 
136
  extracted_text = extract_text_from_excel(file.name)
137
  chunks = split_text_into_chunks(extracted_text)
138
  chunk_responses = [None] * len(chunks)
@@ -165,57 +166,32 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
165
  return index, f"❌ Error analyzing chunk {index+1}: {str(e)}"
166
  return index, clean_response(response)
167
 
 
168
  with ThreadPoolExecutor(max_workers=1) as executor:
169
  futures = [executor.submit(analyze_chunk, i, chunk) for i, chunk in enumerate(chunks)]
170
  for future in as_completed(futures):
171
  i, result = future.result()
172
  chunk_responses[i] = result
173
- if not result.startswith("❌"):
174
- messages.append({"role": "assistant", "content": f"βœ… Chunk {i+1} analysis complete"})
175
- else:
176
- messages.append({"role": "assistant", "content": result})
177
 
178
  valid_responses = [res for res in chunk_responses if not res.startswith("❌")]
179
  if not valid_responses:
180
- messages.append({"role": "assistant", "content": "❌ No valid chunk responses to summarize."})
181
  return messages, report_path
182
 
183
- summary = ""
184
- current_summary_tokens = 0
185
- for i, response in enumerate(valid_responses):
186
- response_tokens = estimate_tokens(response)
187
- if current_summary_tokens + response_tokens > MAX_MODEL_TOKENS - PROMPT_OVERHEAD - MAX_NEW_TOKENS:
188
- summary_prompt = f"Summarize the following analysis:\n\n{summary}\n\nProvide a concise summary."
189
- summary_response = ""
190
- try:
191
- for result in agent.run_gradio_chat(
192
- message=summary_prompt,
193
- history=[],
194
- temperature=0.2,
195
- max_new_tokens=MAX_NEW_TOKENS,
196
- max_token=MAX_MODEL_TOKENS,
197
- call_agent=False,
198
- conversation=[],
199
- ):
200
- if isinstance(result, str):
201
- summary_response += result
202
- elif hasattr(result, "content"):
203
- summary_response += result.content
204
- elif isinstance(result, list):
205
- for r in result:
206
- if hasattr(r, "content"):
207
- summary_response += r.content
208
- summary = clean_response(summary_response)
209
- current_summary_tokens = estimate_tokens(summary)
210
- except Exception as e:
211
- messages.append({"role": "assistant", "content": f"❌ Error summarizing intermediate results: {str(e)}"})
212
- return messages, report_path
213
- summary += f"\n\n### Chunk {i+1} Analysis\n{response}"
214
- current_summary_tokens += response_tokens
215
-
216
- final_prompt = f"Summarize the key findings from the following analyses:\n\n{summary}"
217
- messages.append({"role": "assistant", "content": "πŸ“Š Generating final report..."})
218
 
 
 
219
  final_report_text = ""
220
  try:
221
  for result in agent.run_gradio_chat(
@@ -239,16 +215,18 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
239
  messages.append({"role": "assistant", "content": f"❌ Error generating final report: {str(e)}"})
240
  return messages, report_path
241
 
242
- final_report = f"# 🧠 Final Patient Report\n\n{clean_response(final_report_text)}"
243
- messages[-1]["content"] = f"πŸ“Š Final Report:\n\n{clean_response(final_report_text)}"
 
 
244
 
245
  timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
246
- report_path = os.path.join(report_dir, f"report_{timestamp}.md")
247
 
248
  with open(report_path, 'w') as f:
249
  f.write(final_report)
250
 
251
- messages.append({"role": "assistant", "content": f"βœ… Report generated and saved: report_{timestamp}.md"})
252
 
253
  except Exception as e:
254
  messages.append({"role": "assistant", "content": f"❌ Error processing file: {str(e)}"})
@@ -257,63 +235,129 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
257
 
258
  def create_ui(agent):
259
  with gr.Blocks(
260
- title="Patient History Chat",
261
  css="""
262
  .gradio-container {
263
  max-width: 900px !important;
264
  margin: auto;
265
- font-family: 'Segoe UI', sans-serif;
266
- background-color: #f8f9fa;
267
  }
268
  .gr-button.primary {
269
- background: linear-gradient(to right, #4b6cb7, #182848);
270
  color: white;
271
  border: none;
272
  border-radius: 8px;
 
 
 
273
  }
274
  .gr-button.primary:hover {
275
- background: linear-gradient(to right, #3552a3, #101a3e);
 
 
276
  }
277
  .gr-file-upload, .gr-chatbot, .gr-markdown {
278
  background-color: white;
279
- border-radius: 10px;
280
- box-shadow: 0 2px 4px rgba(0,0,0,0.1);
281
- padding: 1rem;
 
282
  }
283
  .gr-chatbot {
284
- border-left: 4px solid #4b6cb7;
 
285
  }
286
- .gr-file-upload input {
287
- font-size: 0.95rem;
 
 
 
 
 
 
 
 
 
 
288
  }
289
- .chat-message-content p {
 
 
 
 
290
  margin: 0.3em 0;
291
  }
292
- .chat-message-content ul {
293
- padding-left: 1.2em;
294
- margin: 0.4em 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295
  }
296
  """
297
  ) as demo:
298
  gr.Markdown("""
299
- <h2 style='color:#182848'>πŸ₯ Patient History Analysis Tool</h2>
300
- <p style='color:#444;'>Upload an Excel file containing clinical data. The assistant will analyze it for patterns, inconsistencies, and recommendations.</p>
 
 
301
  """)
302
 
303
  with gr.Row():
304
  with gr.Column(scale=3):
305
  chatbot = gr.Chatbot(
306
- label="Clinical Assistant",
307
  show_copy_button=True,
308
  height=600,
309
- type="messages",
310
  avatar_images=(None, "https://i.imgur.com/6wX7Zb4.png"),
311
- render_markdown=True
 
 
312
  )
313
  with gr.Column(scale=1):
314
- file_upload = gr.File(label="Upload Excel File", file_types=[".xlsx"], height=100)
315
- analyze_btn = gr.Button("🧠 Analyze Patient History", variant="primary", elem_classes="primary")
316
- report_output = gr.File(label="Download Report", visible=False, interactive=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
 
318
  chatbot_state = gr.State(value=[])
319
 
@@ -324,13 +368,23 @@ def create_ui(agent):
324
  role = msg.get("role")
325
  content = msg.get("content", "")
326
  if role == "assistant":
327
- content = content.replace("- ", "\n- ")
328
- content = f"<div class='chat-message-content'>{content}</div>"
 
 
 
 
329
  formatted_messages.append({"role": role, "content": content})
 
330
  report_update = gr.update(visible=report_path is not None, value=report_path)
331
  return formatted_messages, report_update, formatted_messages
332
 
333
- analyze_btn.click(fn=update_ui, inputs=[file_upload, chatbot_state], outputs=[chatbot, report_output, chatbot_state], api_name="analyze")
 
 
 
 
 
334
 
335
  return demo
336
 
@@ -338,7 +392,13 @@ if __name__ == "__main__":
338
  try:
339
  agent = init_agent()
340
  demo = create_ui(agent)
341
- demo.launch(server_name="0.0.0.0", server_port=7860, show_error=True, allowed_paths=["/data/hf_cache/reports"], share=False)
 
 
 
 
 
 
342
  except Exception as e:
343
  print(f"Error: {str(e)}")
344
- sys.exit(1)
 
131
  return messages, report_path
132
 
133
  try:
134
+ messages.append({"role": "user", "content": f"πŸ“„ Processing Excel file: {os.path.basename(file.name)}"})
135
+ messages.append({"role": "assistant", "content": "πŸ” Analyzing clinical data... This may take a moment."})
136
+
137
  extracted_text = extract_text_from_excel(file.name)
138
  chunks = split_text_into_chunks(extracted_text)
139
  chunk_responses = [None] * len(chunks)
 
166
  return index, f"❌ Error analyzing chunk {index+1}: {str(e)}"
167
  return index, clean_response(response)
168
 
169
+ # Process chunks silently without displaying progress
170
  with ThreadPoolExecutor(max_workers=1) as executor:
171
  futures = [executor.submit(analyze_chunk, i, chunk) for i, chunk in enumerate(chunks)]
172
  for future in as_completed(futures):
173
  i, result = future.result()
174
  chunk_responses[i] = result
 
 
 
 
175
 
176
  valid_responses = [res for res in chunk_responses if not res.startswith("❌")]
177
  if not valid_responses:
178
+ messages.append({"role": "assistant", "content": "❌ No valid analysis results to summarize."})
179
  return messages, report_path
180
 
181
+ summary = "\n\n".join(valid_responses)
182
+ final_prompt = f"""Please synthesize the following clinical analyses into a concise, well-structured report:
183
+
184
+ {summary}
185
+
186
+ Structure your response with clear sections:
187
+ 1. Key Diagnostic Patterns
188
+ 2. Medication Concerns
189
+ 3. Potential Missed Opportunities
190
+ 4. Notable Inconsistencies
191
+ 5. Recommended Follow-ups
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
 
193
+ Use bullet points for clarity and professional medical terminology."""
194
+
195
  final_report_text = ""
196
  try:
197
  for result in agent.run_gradio_chat(
 
215
  messages.append({"role": "assistant", "content": f"❌ Error generating final report: {str(e)}"})
216
  return messages, report_path
217
 
218
+ final_report = f"# 🧠 Clinical Analysis Report\n\n{clean_response(final_report_text)}"
219
+
220
+ # Update the last message with the final report
221
+ messages[-1]["content"] = f"## πŸ“‹ Clinical Analysis Report\n\n{clean_response(final_report_text)}"
222
 
223
  timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
224
+ report_path = os.path.join(report_dir, f"clinical_report_{timestamp}.md")
225
 
226
  with open(report_path, 'w') as f:
227
  f.write(final_report)
228
 
229
+ messages.append({"role": "assistant", "content": f"βœ… Report generated successfully. You can download it below."})
230
 
231
  except Exception as e:
232
  messages.append({"role": "assistant", "content": f"❌ Error processing file: {str(e)}"})
 
235
 
236
  def create_ui(agent):
237
  with gr.Blocks(
238
+ title="Clinical Analysis Tool",
239
  css="""
240
  .gradio-container {
241
  max-width: 900px !important;
242
  margin: auto;
243
+ font-family: 'Inter', sans-serif;
244
+ background-color: #f9fafb;
245
  }
246
  .gr-button.primary {
247
+ background: linear-gradient(to right, #4f46e5, #7c3aed);
248
  color: white;
249
  border: none;
250
  border-radius: 8px;
251
+ padding: 12px 24px;
252
+ font-weight: 500;
253
+ transition: all 0.2s;
254
  }
255
  .gr-button.primary:hover {
256
+ background: linear-gradient(to right, #4338ca, #6d28d9);
257
+ transform: translateY(-1px);
258
+ box-shadow: 0 4px 6px rgba(0,0,0,0.1);
259
  }
260
  .gr-file-upload, .gr-chatbot, .gr-markdown {
261
  background-color: white;
262
+ border-radius: 12px;
263
+ box-shadow: 0 1px 3px rgba(0,0,0,0.05);
264
+ padding: 1.5rem;
265
+ border: 1px solid #e5e7eb;
266
  }
267
  .gr-chatbot {
268
+ min-height: 600px;
269
+ border-left: none;
270
  }
271
+ .chat-message-user {
272
+ background-color: #f3f4f6;
273
+ border-radius: 12px;
274
+ padding: 12px 16px;
275
+ margin: 8px 0;
276
+ }
277
+ .chat-message-assistant {
278
+ background-color: white;
279
+ border-radius: 12px;
280
+ padding: 12px 16px;
281
+ margin: 8px 0;
282
+ border: 1px solid #e5e7eb;
283
  }
284
+ .chat-message-content ul, .chat-message-content ol {
285
+ padding-left: 1.5em;
286
+ margin: 0.5em 0;
287
+ }
288
+ .chat-message-content li {
289
  margin: 0.3em 0;
290
  }
291
+ h1, h2, h3 {
292
+ color: #111827;
293
+ }
294
+ .gr-markdown h1 {
295
+ font-size: 1.8rem;
296
+ margin-bottom: 1rem;
297
+ font-weight: 600;
298
+ }
299
+ .gr-markdown p {
300
+ color: #4b5563;
301
+ line-height: 1.6;
302
+ }
303
+ .progress-bar {
304
+ height: 4px;
305
+ background: #e5e7eb;
306
+ border-radius: 2px;
307
+ margin: 12px 0;
308
+ overflow: hidden;
309
+ }
310
+ .progress-bar-fill {
311
+ height: 100%;
312
+ background: linear-gradient(to right, #4f46e5, #7c3aed);
313
+ transition: width 0.3s ease;
314
  }
315
  """
316
  ) as demo:
317
  gr.Markdown("""
318
+ <div style='text-align: center; margin-bottom: 1.5rem;'>
319
+ <h1 style='margin-bottom: 0.5rem; color: #111827;'>Clinical Documentation Analyzer</h1>
320
+ <p style='color: #6b7280; margin-top: 0;'>Upload patient records in Excel format for comprehensive clinical analysis</p>
321
+ </div>
322
  """)
323
 
324
  with gr.Row():
325
  with gr.Column(scale=3):
326
  chatbot = gr.Chatbot(
327
+ label="Analysis Results",
328
  show_copy_button=True,
329
  height=600,
330
+ bubble_full_width=False,
331
  avatar_images=(None, "https://i.imgur.com/6wX7Zb4.png"),
332
+ render_markdown=True,
333
+ likeable=True,
334
+ layout="panel"
335
  )
336
  with gr.Column(scale=1):
337
+ file_upload = gr.File(
338
+ label="Upload Patient Records",
339
+ file_types=[".xlsx", ".xls"],
340
+ height=100,
341
+ interactive=True
342
+ )
343
+ analyze_btn = gr.Button(
344
+ "Analyze Clinical Data",
345
+ variant="primary",
346
+ elem_classes="primary"
347
+ )
348
+ report_output = gr.File(
349
+ label="Download Report",
350
+ visible=False,
351
+ interactive=False
352
+ )
353
+ gr.Markdown("""
354
+ <div style='margin-top: 1rem; padding: 1rem; background-color: #f8fafc; border-radius: 8px;'>
355
+ <h3 style='margin-top: 0; margin-bottom: 0.5rem; font-size: 1rem;'>About this tool</h3>
356
+ <p style='margin: 0; font-size: 0.9rem; color: #64748b;'>
357
+ This tool analyzes clinical documentation to identify patterns, inconsistencies, and opportunities for improved patient care.
358
+ </p>
359
+ </div>
360
+ """)
361
 
362
  chatbot_state = gr.State(value=[])
363
 
 
368
  role = msg.get("role")
369
  content = msg.get("content", "")
370
  if role == "assistant":
371
+ # Format lists and sections for better readability
372
+ content = content.replace("- ", "β€’ ")
373
+ content = re.sub(r"(\d+\.\s)", r"\n\1", content)
374
+ content = f"<div class='chat-message-assistant'>{content}</div>"
375
+ else:
376
+ content = f"<div class='chat-message-user'>{content}</div>"
377
  formatted_messages.append({"role": role, "content": content})
378
+
379
  report_update = gr.update(visible=report_path is not None, value=report_path)
380
  return formatted_messages, report_update, formatted_messages
381
 
382
+ analyze_btn.click(
383
+ fn=update_ui,
384
+ inputs=[file_upload, chatbot_state],
385
+ outputs=[chatbot, report_output, chatbot_state],
386
+ api_name="analyze"
387
+ )
388
 
389
  return demo
390
 
 
392
  try:
393
  agent = init_agent()
394
  demo = create_ui(agent)
395
+ demo.launch(
396
+ server_name="0.0.0.0",
397
+ server_port=7860,
398
+ show_error=True,
399
+ allowed_paths=["/data/hf_cache/reports"],
400
+ share=False
401
+ )
402
  except Exception as e:
403
  print(f"Error: {str(e)}")
404
+ sys.exit(1)