Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import pandas as pd
|
|
4 |
import pdfplumber
|
5 |
import json
|
6 |
import gradio as gr
|
7 |
-
from typing import List, Dict, Optional, Generator
|
8 |
from concurrent.futures import ThreadPoolExecutor, as_completed
|
9 |
import hashlib
|
10 |
import shutil
|
@@ -253,6 +253,10 @@ def summarize_findings(combined_response: str) -> str:
|
|
253 |
|
254 |
return summary.strip()
|
255 |
|
|
|
|
|
|
|
|
|
256 |
def init_agent():
|
257 |
logger.info("Initializing model...")
|
258 |
log_system_usage("Before Load")
|
@@ -276,6 +280,145 @@ def init_agent():
|
|
276 |
logger.info("Agent Ready")
|
277 |
return agent
|
278 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
279 |
def create_ui(agent):
|
280 |
with gr.Blocks(theme=gr.themes.Soft(), title="Clinical Oversight Assistant") as demo:
|
281 |
gr.Markdown("<h1 style='text-align: center;'>🩺 Clinical Oversight Assistant</h1>")
|
@@ -289,7 +432,7 @@ def create_ui(agent):
|
|
289 |
avatar_images=(
|
290 |
"assets/user.png",
|
291 |
"assets/assistant.png"
|
292 |
-
),
|
293 |
render=False
|
294 |
)
|
295 |
with gr.Column(scale=1):
|
@@ -330,151 +473,6 @@ def create_ui(agent):
|
|
330 |
interactive=False
|
331 |
)
|
332 |
|
333 |
-
def update_progress(current, total, stage=""):
|
334 |
-
progress = f"{stage} - {current}/{total}" if stage else f"{current}/{total}"
|
335 |
-
return {"value": progress, "visible": True, "label": f"Progress: {progress}"}
|
336 |
-
|
337 |
-
prompt_template = """
|
338 |
-
Analyze the patient record excerpt for missed diagnoses only. Provide a concise, evidence-based summary as a single paragraph without headings or bullet points. Include specific clinical findings (e.g., 'elevated blood pressure (160/95) on page 10'), their potential implications (e.g., 'may indicate untreated hypertension'), and a recommendation for urgent review. Do not include other oversight categories like medication conflicts. If no missed diagnoses are found, state 'No missed diagnoses identified' in a single sentence.
|
339 |
-
Patient Record Excerpt (Chunk {0} of {1}):
|
340 |
-
{chunk}
|
341 |
-
"""
|
342 |
-
|
343 |
-
def process_response_stream(prompt: str, history: List[dict]) -> Generator[dict, None, None]:
|
344 |
-
"""Process a single prompt and stream the response"""
|
345 |
-
full_response = ""
|
346 |
-
for chunk_output in agent.run_gradio_chat(prompt, [], 0.2, 512, 2048, False, []):
|
347 |
-
if chunk_output is None:
|
348 |
-
continue
|
349 |
-
|
350 |
-
if isinstance(chunk_output, list):
|
351 |
-
for m in chunk_output:
|
352 |
-
if hasattr(m, 'content') and m.content:
|
353 |
-
cleaned = clean_response(m.content)
|
354 |
-
if cleaned:
|
355 |
-
full_response += cleaned + " "
|
356 |
-
yield {"role": "assistant", "content": full_response}
|
357 |
-
elif isinstance(chunk_output, str) and chunk_output.strip():
|
358 |
-
cleaned = clean_response(chunk_output)
|
359 |
-
if cleaned:
|
360 |
-
full_response += cleaned + " "
|
361 |
-
yield {"role": "assistant", "content": full_response}
|
362 |
-
|
363 |
-
return full_response
|
364 |
-
|
365 |
-
def analyze(message: str, history: List[dict], files: List) -> Generator[dict, None, None]:
|
366 |
-
# Start with user message
|
367 |
-
history.append({"role": "user", "content": message})
|
368 |
-
yield {
|
369 |
-
"chatbot": history,
|
370 |
-
"download_output": None,
|
371 |
-
"final_summary": "",
|
372 |
-
"progress_text": {"value": "Starting analysis...", "visible": True}
|
373 |
-
}
|
374 |
-
|
375 |
-
extracted = []
|
376 |
-
file_hash_value = ""
|
377 |
-
|
378 |
-
if files:
|
379 |
-
# Process files in parallel
|
380 |
-
with ThreadPoolExecutor(max_workers=4) as executor:
|
381 |
-
futures = []
|
382 |
-
for f in files:
|
383 |
-
file_type = f.name.split(".")[-1].lower()
|
384 |
-
futures.append(executor.submit(process_file, f.name, file_type))
|
385 |
-
|
386 |
-
for i, future in enumerate(as_completed(futures), 1):
|
387 |
-
try:
|
388 |
-
extracted.extend(future.result())
|
389 |
-
yield {
|
390 |
-
"progress_text": update_progress(i, len(files), "Processing files")
|
391 |
-
}
|
392 |
-
except Exception as e:
|
393 |
-
logger.error(f"File processing error: {e}")
|
394 |
-
extracted.append({"error": f"Error processing file: {str(e)}"})
|
395 |
-
|
396 |
-
file_hash_value = file_hash(files[0].name) if files else ""
|
397 |
-
history.append({"role": "assistant", "content": "✅ File processing complete"})
|
398 |
-
yield {
|
399 |
-
"chatbot": history,
|
400 |
-
"download_output": None,
|
401 |
-
"final_summary": "",
|
402 |
-
"progress_text": update_progress(len(files), len(files), "Files processed")
|
403 |
-
}
|
404 |
-
|
405 |
-
# Convert extracted data to JSON text
|
406 |
-
text_content = "\n".join(json.dumps(item) for item in extracted)
|
407 |
-
|
408 |
-
# Tokenize and chunk the content properly
|
409 |
-
chunks = tokenize_and_chunk(text_content)
|
410 |
-
combined_response = ""
|
411 |
-
|
412 |
-
try:
|
413 |
-
for chunk_idx, chunk in enumerate(chunks, 1):
|
414 |
-
prompt = prompt_template.format(chunk_idx, len(chunks), chunk=chunk[:1800])
|
415 |
-
|
416 |
-
# Create a placeholder message
|
417 |
-
history.append({"role": "assistant", "content": ""})
|
418 |
-
yield {
|
419 |
-
"chatbot": history,
|
420 |
-
"download_output": None,
|
421 |
-
"final_summary": "",
|
422 |
-
"progress_text": update_progress(chunk_idx, len(chunks), "Analyzing")
|
423 |
-
}
|
424 |
-
|
425 |
-
# Process and stream the response
|
426 |
-
chunk_response = ""
|
427 |
-
for update in process_response_stream(prompt, history):
|
428 |
-
# Update the last message with streaming content
|
429 |
-
history[-1] = update
|
430 |
-
chunk_response = update["content"]
|
431 |
-
yield {
|
432 |
-
"chatbot": history,
|
433 |
-
"download_output": None,
|
434 |
-
"final_summary": "",
|
435 |
-
"progress_text": update_progress(chunk_idx, len(chunks), "Analyzing")
|
436 |
-
}
|
437 |
-
|
438 |
-
combined_response += f"--- Analysis for Chunk {chunk_idx} ---\n{chunk_response}\n"
|
439 |
-
|
440 |
-
# Clean up memory
|
441 |
-
torch.cuda.empty_cache()
|
442 |
-
gc.collect()
|
443 |
-
|
444 |
-
# Generate final summary
|
445 |
-
summary = summarize_findings(combined_response)
|
446 |
-
report_path = os.path.join(report_dir, f"{file_hash_value}_report.txt") if file_hash_value else None
|
447 |
-
if report_path:
|
448 |
-
with open(report_path, "w", encoding="utf-8") as f:
|
449 |
-
f.write(combined_response + "\n\n" + summary)
|
450 |
-
|
451 |
-
yield {
|
452 |
-
"chatbot": history,
|
453 |
-
"download_output": report_path if report_path and os.path.exists(report_path) else None,
|
454 |
-
"final_summary": summary,
|
455 |
-
"progress_text": {"visible": False}
|
456 |
-
}
|
457 |
-
|
458 |
-
except Exception as e:
|
459 |
-
logger.error("Analysis error: %s", e)
|
460 |
-
history.append({"role": "assistant", "content": f"❌ Error occurred: {str(e)}"})
|
461 |
-
yield {
|
462 |
-
"chatbot": history,
|
463 |
-
"download_output": None,
|
464 |
-
"final_summary": f"Error occurred during analysis: {str(e)}",
|
465 |
-
"progress_text": {"visible": False}
|
466 |
-
}
|
467 |
-
|
468 |
-
def clear_and_start():
|
469 |
-
return [
|
470 |
-
[], # chatbot
|
471 |
-
None, # download_output
|
472 |
-
"", # final_summary
|
473 |
-
"", # msg_input
|
474 |
-
None, # file_upload
|
475 |
-
{"visible": False} # progress_text
|
476 |
-
]
|
477 |
-
|
478 |
# Event handlers
|
479 |
send_btn.click(
|
480 |
analyze,
|
|
|
4 |
import pdfplumber
|
5 |
import json
|
6 |
import gradio as gr
|
7 |
+
from typing import List, Dict, Optional, Generator, Any
|
8 |
from concurrent.futures import ThreadPoolExecutor, as_completed
|
9 |
import hashlib
|
10 |
import shutil
|
|
|
253 |
|
254 |
return summary.strip()
|
255 |
|
256 |
+
def update_progress(current, total, stage=""):
|
257 |
+
progress = f"{stage} - {current}/{total}" if stage else f"{current}/{total}"
|
258 |
+
return {"value": progress, "visible": True, "label": f"Progress: {progress}"}
|
259 |
+
|
260 |
def init_agent():
|
261 |
logger.info("Initializing model...")
|
262 |
log_system_usage("Before Load")
|
|
|
280 |
logger.info("Agent Ready")
|
281 |
return agent
|
282 |
|
283 |
+
def process_response_stream(prompt: str, history: List[dict]) -> Generator[dict, None, None]:
|
284 |
+
"""Process a single prompt and stream the response"""
|
285 |
+
full_response = ""
|
286 |
+
for chunk_output in agent.run_gradio_chat(prompt, [], 0.2, 512, 2048, False, []):
|
287 |
+
if chunk_output is None:
|
288 |
+
continue
|
289 |
+
|
290 |
+
if isinstance(chunk_output, list):
|
291 |
+
for m in chunk_output:
|
292 |
+
if hasattr(m, 'content') and m.content:
|
293 |
+
cleaned = clean_response(m.content)
|
294 |
+
if cleaned:
|
295 |
+
full_response += cleaned + " "
|
296 |
+
yield {"role": "assistant", "content": full_response}
|
297 |
+
elif isinstance(chunk_output, str) and chunk_output.strip():
|
298 |
+
cleaned = clean_response(chunk_output)
|
299 |
+
if cleaned:
|
300 |
+
full_response += cleaned + " "
|
301 |
+
yield {"role": "assistant", "content": full_response}
|
302 |
+
|
303 |
+
return full_response
|
304 |
+
|
305 |
+
def analyze(message: str, history: List[dict], files: List) -> Generator[Dict[str, Any], None, None]:
|
306 |
+
# Initialize outputs
|
307 |
+
outputs = {
|
308 |
+
"chatbot": history.copy(),
|
309 |
+
"download_output": None,
|
310 |
+
"final_summary": "",
|
311 |
+
"progress_text": {"value": "Starting analysis...", "visible": True}
|
312 |
+
}
|
313 |
+
|
314 |
+
try:
|
315 |
+
# Start with user message
|
316 |
+
history.append({"role": "user", "content": message})
|
317 |
+
outputs["chatbot"] = history
|
318 |
+
yield outputs
|
319 |
+
|
320 |
+
extracted = []
|
321 |
+
file_hash_value = ""
|
322 |
+
|
323 |
+
if files:
|
324 |
+
# Process files in parallel
|
325 |
+
with ThreadPoolExecutor(max_workers=4) as executor:
|
326 |
+
futures = []
|
327 |
+
for f in files:
|
328 |
+
file_type = f.name.split(".")[-1].lower()
|
329 |
+
futures.append(executor.submit(process_file, f.name, file_type))
|
330 |
+
|
331 |
+
for i, future in enumerate(as_completed(futures), 1):
|
332 |
+
try:
|
333 |
+
extracted.extend(future.result())
|
334 |
+
outputs["progress_text"] = update_progress(i, len(files), "Processing files")
|
335 |
+
yield outputs
|
336 |
+
except Exception as e:
|
337 |
+
logger.error(f"File processing error: {e}")
|
338 |
+
extracted.append({"error": f"Error processing file: {str(e)}"})
|
339 |
+
|
340 |
+
file_hash_value = file_hash(files[0].name) if files else ""
|
341 |
+
history.append({"role": "assistant", "content": "✅ File processing complete"})
|
342 |
+
outputs.update({
|
343 |
+
"chatbot": history,
|
344 |
+
"progress_text": update_progress(len(files), len(files), "Files processed")
|
345 |
+
})
|
346 |
+
yield outputs
|
347 |
+
|
348 |
+
# Convert extracted data to JSON text
|
349 |
+
text_content = "\n".join(json.dumps(item) for item in extracted)
|
350 |
+
|
351 |
+
# Tokenize and chunk the content properly
|
352 |
+
chunks = tokenize_and_chunk(text_content)
|
353 |
+
combined_response = ""
|
354 |
+
|
355 |
+
for chunk_idx, chunk in enumerate(chunks, 1):
|
356 |
+
prompt = f"""
|
357 |
+
Analyze the patient record excerpt for missed diagnoses only. Provide a concise, evidence-based summary as a single paragraph without headings or bullet points. Include specific clinical findings (e.g., 'elevated blood pressure (160/95) on page 10'), their potential implications (e.g., 'may indicate untreated hypertension'), and a recommendation for urgent review. Do not include other oversight categories like medication conflicts. If no missed diagnoses are found, state 'No missed diagnoses identified' in a single sentence.
|
358 |
+
|
359 |
+
Patient Record Excerpt (Chunk {chunk_idx} of {len(chunks)}):
|
360 |
+
{chunk[:1800]}
|
361 |
+
"""
|
362 |
+
|
363 |
+
# Create a placeholder message
|
364 |
+
history.append({"role": "assistant", "content": ""})
|
365 |
+
outputs.update({
|
366 |
+
"chatbot": history,
|
367 |
+
"progress_text": update_progress(chunk_idx, len(chunks)), "Analyzing")
|
368 |
+
})
|
369 |
+
yield outputs
|
370 |
+
|
371 |
+
# Process and stream the response
|
372 |
+
chunk_response = ""
|
373 |
+
for update in process_response_stream(prompt, history):
|
374 |
+
history[-1] = update
|
375 |
+
chunk_response = update["content"]
|
376 |
+
outputs.update({
|
377 |
+
"chatbot": history,
|
378 |
+
"progress_text": update_progress(chunk_idx, len(chunks)), "Analyzing")
|
379 |
+
})
|
380 |
+
yield outputs
|
381 |
+
|
382 |
+
combined_response += f"--- Analysis for Chunk {chunk_idx} ---\n{chunk_response}\n"
|
383 |
+
|
384 |
+
# Clean up memory
|
385 |
+
torch.cuda.empty_cache()
|
386 |
+
gc.collect()
|
387 |
+
|
388 |
+
# Generate final summary
|
389 |
+
summary = summarize_findings(combined_response)
|
390 |
+
report_path = os.path.join(report_dir, f"{file_hash_value}_report.txt") if file_hash_value else None
|
391 |
+
if report_path:
|
392 |
+
with open(report_path, "w", encoding="utf-8") as f:
|
393 |
+
f.write(combined_response + "\n\n" + summary)
|
394 |
+
|
395 |
+
outputs.update({
|
396 |
+
"download_output": report_path if report_path and os.path.exists(report_path) else None,
|
397 |
+
"final_summary": summary,
|
398 |
+
"progress_text": {"visible": False}
|
399 |
+
})
|
400 |
+
yield outputs
|
401 |
+
|
402 |
+
except Exception as e:
|
403 |
+
logger.error("Analysis error: %s", e)
|
404 |
+
history.append({"role": "assistant", "content": f"❌ Error occurred: {str(e)}"})
|
405 |
+
outputs.update({
|
406 |
+
"chatbot": history,
|
407 |
+
"final_summary": f"Error occurred during analysis: {str(e)}",
|
408 |
+
"progress_text": {"visible": False}
|
409 |
+
})
|
410 |
+
yield outputs
|
411 |
+
|
412 |
+
def clear_and_start():
|
413 |
+
return [
|
414 |
+
[], # chatbot
|
415 |
+
None, # download_output
|
416 |
+
"", # final_summary
|
417 |
+
"", # msg_input
|
418 |
+
None, # file_upload
|
419 |
+
{"visible": False} # progress_text
|
420 |
+
]
|
421 |
+
|
422 |
def create_ui(agent):
|
423 |
with gr.Blocks(theme=gr.themes.Soft(), title="Clinical Oversight Assistant") as demo:
|
424 |
gr.Markdown("<h1 style='text-align: center;'>🩺 Clinical Oversight Assistant</h1>")
|
|
|
432 |
avatar_images=(
|
433 |
"assets/user.png",
|
434 |
"assets/assistant.png"
|
435 |
+
) if os.path.exists("assets/user.png") else None,
|
436 |
render=False
|
437 |
)
|
438 |
with gr.Column(scale=1):
|
|
|
473 |
interactive=False
|
474 |
)
|
475 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
476 |
# Event handlers
|
477 |
send_btn.click(
|
478 |
analyze,
|