Ali2206 commited on
Commit
ba63eca
·
verified ·
1 Parent(s): 65e7d58

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -450
app.py CHANGED
@@ -1,309 +1,33 @@
1
- import sys
2
- import os
3
- import pandas as pd
4
- import pdfplumber
5
- import json
6
- import gradio as gr
7
- from typing import List, Dict, Optional, Generator, Any
8
- from concurrent.futures import ThreadPoolExecutor, as_completed
9
- import hashlib
10
- import shutil
11
- import re
12
- import psutil
13
- import subprocess
14
- import logging
15
- import torch
16
- import gc
17
- from diskcache import Cache
18
- import time
19
- from transformers import AutoTokenizer
20
-
21
- # Configure logging
22
- logging.basicConfig(level=logging.INFO)
23
- logger = logging.getLogger(__name__)
24
-
25
- # Persistent directory
26
- persistent_dir = "/data/hf_cache"
27
- os.makedirs(persistent_dir, exist_ok=True)
28
-
29
- model_cache_dir = os.path.join(persistent_dir, "txagent_models")
30
- tool_cache_dir = os.path.join(persistent_dir, "tool_cache")
31
- file_cache_dir = os.path.join(persistent_dir, "cache")
32
- report_dir = os.path.join(persistent_dir, "reports")
33
- vllm_cache_dir = os.path.join(persistent_dir, "vllm_cache")
34
-
35
- for directory in [model_cache_dir, tool_cache_dir, file_cache_dir, report_dir, vllm_cache_dir]:
36
- os.makedirs(directory, exist_ok=True)
37
-
38
- os.environ["HF_HOME"] = model_cache_dir
39
- os.environ["TRANSFORMERS_CACHE"] = model_cache_dir
40
- os.environ["VLLM_CACHE_DIR"] = vllm_cache_dir
41
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
42
- os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
43
-
44
- current_dir = os.path.dirname(os.path.abspath(__file__))
45
- src_path = os.path.abspath(os.path.join(current_dir, "src"))
46
- sys.path.insert(0, src_path)
47
-
48
- from txagent.txagent import TxAgent
49
-
50
- # Initialize cache with 10GB limit
51
- cache = Cache(file_cache_dir, size_limit=10 * 1024**3)
52
-
53
- # Initialize tokenizer for precise chunking
54
- tokenizer = AutoTokenizer.from_pretrained("mims-harvard/TxAgent-T1-Llama-3.1-8B")
55
-
56
- def sanitize_utf8(text: str) -> str:
57
- return text.encode("utf-8", "ignore").decode("utf-8")
58
-
59
- def file_hash(path: str) -> str:
60
- with open(path, "rb") as f:
61
- return hashlib.md5(f.read()).hexdigest()
62
-
63
- def extract_all_pages(file_path: str, progress_callback=None) -> str:
64
- try:
65
- with pdfplumber.open(file_path) as pdf:
66
- total_pages = len(pdf.pages)
67
- if total_pages == 0:
68
- return ""
69
-
70
- batch_size = 10
71
- batches = [(i, min(i + batch_size, total_pages)) for i in range(0, total_pages, batch_size)]
72
- text_chunks = [""] * total_pages
73
- processed_pages = 0
74
-
75
- def extract_batch(start: int, end: int) -> List[tuple]:
76
- results = []
77
- with pdfplumber.open(file_path) as pdf:
78
- for page in pdf.pages[start:end]:
79
- page_num = start + pdf.pages.index(page)
80
- page_text = page.extract_text() or ""
81
- results.append((page_num, f"=== Page {page_num + 1} ===\n{page_text.strip()}"))
82
- return results
83
-
84
- with ThreadPoolExecutor(max_workers=6) as executor:
85
- futures = [executor.submit(extract_batch, start, end) for start, end in batches]
86
- for future in as_completed(futures):
87
- for page_num, text in future.result():
88
- text_chunks[page_num] = text
89
- processed_pages += batch_size
90
- if progress_callback:
91
- progress_callback(min(processed_pages, total_pages), total_pages)
92
-
93
- return "\n\n".join(filter(None, text_chunks))
94
- except Exception as e:
95
- logger.error("PDF processing error: %s", e)
96
- return f"PDF processing error: {str(e)}"
97
-
98
- def excel_to_json(file_path: str) -> List[Dict]:
99
- try:
100
- try:
101
- df = pd.read_excel(file_path, engine='openpyxl', header=None, dtype=str)
102
- except Exception:
103
- df = pd.read_excel(file_path, engine='xlrd', header=None, dtype=str)
104
-
105
- content = df.where(pd.notnull(df), "").astype(str).values.tolist()
106
-
107
- return [{
108
- "filename": os.path.basename(file_path),
109
- "rows": content,
110
- "type": "excel"
111
- }]
112
- except Exception as e:
113
- logger.error(f"Error processing Excel file: {e}")
114
- return [{"error": f"Error processing Excel file: {str(e)}"}]
115
-
116
- def csv_to_json(file_path: str) -> List[Dict]:
117
- try:
118
- chunks = []
119
- for chunk in pd.read_csv(
120
- file_path,
121
- header=None,
122
- dtype=str,
123
- encoding_errors='replace',
124
- on_bad_lines='skip',
125
- chunksize=10000
126
- ):
127
- chunks.append(chunk)
128
-
129
- df = pd.concat(chunks) if chunks else pd.DataFrame()
130
- content = df.where(pd.notnull(df), "").astype(str).values.tolist()
131
-
132
- return [{
133
- "filename": os.path.basename(file_path),
134
- "rows": content,
135
- "type": "csv"
136
- }]
137
- except Exception as e:
138
- logger.error(f"Error processing CSV file: {e}")
139
- return [{"error": f"Error processing CSV file: {str(e)}"}]
140
-
141
- def process_file(file_path: str, file_type: str) -> List[Dict]:
142
- try:
143
- if file_type == "pdf":
144
- text = extract_all_pages(file_path)
145
- return [{
146
- "filename": os.path.basename(file_path),
147
- "content": text,
148
- "status": "initial",
149
- "type": "pdf"
150
- }]
151
- elif file_type in ["xls", "xlsx"]:
152
- return excel_to_json(file_path)
153
- elif file_type == "csv":
154
- return csv_to_json(file_path)
155
- else:
156
- return [{"error": f"Unsupported file type: {file_type}"}]
157
- except Exception as e:
158
- logger.error("Error processing %s: %s", os.path.basename(file_path), e)
159
- return [{"error": f"Error processing {os.path.basename(file_path)}: {str(e)}"}]
160
-
161
- def tokenize_and_chunk(text: str, max_tokens: int = 1800) -> List[str]:
162
- tokens = tokenizer.encode(text)
163
- chunks = []
164
- for i in range(0, len(tokens), max_tokens):
165
- chunk_tokens = tokens[i:i + max_tokens]
166
- chunks.append(tokenizer.decode(chunk_tokens))
167
- return chunks
168
-
169
- def log_system_usage(tag=""):
170
- try:
171
- cpu = psutil.cpu_percent(interval=1)
172
- mem = psutil.virtual_memory()
173
- logger.info("[%s] CPU: %.1f%% | RAM: %dMB / %dMB", tag, cpu, mem.used // (1024**2), mem.total // (1024**2))
174
- result = subprocess.run(
175
- ["nvidia-smi", "--query-gpu=memory.used,memory.total,utilization.gpu", "--format=csv,nounits,noheader"],
176
- capture_output=True, text=True
177
- )
178
- if result.returncode == 0:
179
- used, total, util = result.stdout.strip().split(", ")
180
- logger.info("[%s] GPU: %sMB / %sMB | Utilization: %s%%", tag, used, total, util)
181
- except Exception as e:
182
- logger.error("[%s] GPU/CPU monitor failed: %s", tag, e)
183
-
184
- def clean_response(text: str) -> str:
185
- text = sanitize_utf8(text)
186
- text = re.sub(r"\[.*?\]|\bNone\b|To analyze the patient record excerpt.*?medications\.|Since the previous attempts.*?\.|I need to.*?medications\.|Retrieving tools.*?\.", "", text, flags=re.DOTALL)
187
- diagnoses = []
188
- lines = text.splitlines()
189
- in_diagnoses_section = False
190
- for line in lines:
191
- line = line.strip()
192
- if not line:
193
- continue
194
- if re.match(r"###\s*Missed Diagnoses", line):
195
- in_diagnoses_section = True
196
- continue
197
- if re.match(r"###\s*(Medication Conflicts|Incomplete Assessments|Urgent Follow-up)", line):
198
- in_diagnoses_section = False
199
- continue
200
- if in_diagnoses_section and re.match(r"-\s*.+", line):
201
- diagnosis = re.sub(r"^\-\s*", "", line).strip()
202
- if diagnosis and not re.match(r"No issues identified", diagnosis, re.IGNORECASE):
203
- diagnoses.append(diagnosis)
204
- text = " ".join(diagnoses)
205
- text = re.sub(r"\s+", " ", text).strip()
206
- text = re.sub(r"[^\w\s\.\,\(\)\-]", "", text)
207
- return text if text else ""
208
-
209
- def summarize_findings(combined_response: str) -> str:
210
- chunks = combined_response.split("--- Analysis for Chunk")
211
- diagnoses = []
212
- for chunk in chunks:
213
- chunk = chunk.strip()
214
- if not chunk or "No oversights identified" in chunk:
215
- continue
216
- lines = chunk.splitlines()
217
- in_diagnoses_section = False
218
- for line in lines:
219
- line = line.strip()
220
- if not line:
221
- continue
222
- if re.match(r"###\s*Missed Diagnoses", line):
223
- in_diagnoses_section = True
224
- continue
225
- if re.match(r"###\s*(Medication Conflicts|Incomplete Assessments|Urgent Follow-up)", line):
226
- in_diagnoses_section = False
227
- continue
228
- if in_diagnoses_section and re.match(r"-\s*.+", line):
229
- diagnosis = re.sub(r"^\-\s*", "", line).strip()
230
- if diagnosis and not re.match(r"No issues identified", diagnosis, re.IGNORECASE):
231
- diagnoses.append(diagnosis)
232
-
233
- seen = set()
234
- unique_diagnoses = [d for d in diagnoses if not (d in seen or seen.add(d))]
235
-
236
- if not unique_diagnoses:
237
- return "No missed diagnoses were identified in the provided records."
238
-
239
- summary = "Missed diagnoses include " + ", ".join(unique_diagnoses[:-1])
240
- if len(unique_diagnoses) > 1:
241
- summary += f", and {unique_diagnoses[-1]}"
242
- elif len(unique_diagnoses) == 1:
243
- summary = "Missed diagnoses include " + unique_diagnoses[0]
244
- summary += ", all of which require urgent clinical review to prevent potential adverse outcomes."
245
-
246
- return summary.strip()
247
-
248
- def update_progress(current: int, total: int, stage: str = "") -> Dict[str, Any]:
249
- progress = f"{stage} - {current}/{total}" if stage else f"{current}/{total}"
250
- return {"value": progress, "visible": True, "label": f"Progress: {progress}"}
251
-
252
- def init_agent():
253
- logger.info("Initializing model...")
254
- log_system_usage("Before Load")
255
- default_tool_path = os.path.abspath("data/new_tool.json")
256
- target_tool_path = os.path.join(tool_cache_dir, "new_tool.json")
257
- if not os.path.exists(target_tool_path):
258
- shutil.copy(default_tool_path, target_tool_path)
259
-
260
- agent = TxAgent(
261
- model_name="mims-harvard/TxAgent-T1-Llama-3.1-8B",
262
- rag_model_name="mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B",
263
- tool_files_dict={"new_tool": target_tool_path},
264
- force_finish=True,
265
- enable_checker=False,
266
- step_rag_num=4,
267
- seed=100,
268
- additional_default_tools=[],
269
- )
270
- agent.init_model()
271
- log_system_usage("After Load")
272
- logger.info("Agent Ready")
273
- return agent
274
-
275
- def process_response_stream(prompt: str, history: List[dict]) -> Generator[dict, None, None]:
276
- full_response = ""
277
- for chunk_output in agent.run_gradio_chat(prompt, [], 0.2, 512, 2048, False, []):
278
- if chunk_output is None:
279
- continue
280
-
281
- if isinstance(chunk_output, list):
282
- for m in chunk_output:
283
- if hasattr(m, 'content') and m.content:
284
- cleaned = clean_response(m.content)
285
- if cleaned:
286
- full_response += cleaned + " "
287
- yield {"role": "assistant", "content": full_response}
288
- elif isinstance(chunk_output, str) and chunk_output.strip():
289
- cleaned = clean_response(chunk_output)
290
- if cleaned:
291
- full_response += cleaned + " "
292
- yield {"role": "assistant", "content": full_response}
293
-
294
- return full_response
295
-
296
- def analyze(message: str, history: List[dict], files: List) -> Generator[tuple, None, None]:
297
- # Initialize outputs
298
- chatbot_output = history.copy()
299
- download_output = None
300
- final_summary = ""
301
- progress_text = {"value": "Starting analysis...", "visible": True}
302
 
303
  try:
304
- # Start with user message
305
- chatbot_output.append({"role": "user", "content": message})
306
- yield (chatbot_output, download_output, final_summary, progress_text)
 
307
 
308
  extracted = []
309
  file_hash_value = ""
@@ -319,175 +43,70 @@ def analyze(message: str, history: List[dict], files: List) -> Generator[tuple,
319
  for i, future in enumerate(as_completed(futures), 1):
320
  try:
321
  extracted.extend(future.result())
322
- progress_text = update_progress(i, len(files), "Processing files")
323
- yield (chatbot_output, download_output, final_summary, progress_text)
324
  except Exception as e:
325
  logger.error(f"File processing error: {e}")
326
  extracted.append({"error": f"Error processing file: {str(e)}"})
327
 
328
  file_hash_value = file_hash(files[0].name) if files else ""
329
- chatbot_output.append({"role": "assistant", "content": "✅ File processing complete"})
330
- progress_text = update_progress(len(files), len(files), "Files processed")
331
- yield (chatbot_output, download_output, final_summary, progress_text)
332
-
333
- # Convert extracted data to JSON text
 
 
 
334
  text_content = "\n".join(json.dumps(item) for item in extracted)
335
-
336
- # Tokenize and chunk the content properly
337
  chunks = tokenize_and_chunk(text_content)
338
  combined_response = ""
339
 
340
  for chunk_idx, chunk in enumerate(chunks, 1):
341
- prompt = f"""
342
- Analyze the patient record excerpt for missed diagnoses only. Provide a concise, evidence-based summary as a single paragraph without headings or bullet points. Include specific clinical findings (e.g., 'elevated blood pressure (160/95) on page 10'), their potential implications (e.g., 'may indicate untreated hypertension'), and a recommendation for urgent review. Do not include other oversight categories like medication conflicts. If no missed diagnoses are found, state 'No missed diagnoses identified' in a single sentence.
343
-
344
- Patient Record Excerpt (Chunk {chunk_idx} of {len(chunks)}):
345
- {chunk[:1800]}
346
- """
347
 
348
- # Create a placeholder message
349
- chatbot_output.append({"role": "assistant", "content": ""})
350
- progress_text = update_progress(chunk_idx, len(chunks), "Analyzing")
351
- yield (chatbot_output, download_output, final_summary, progress_text)
 
 
352
 
353
- # Process and stream the response
354
  chunk_response = ""
355
- for update in process_response_stream(prompt, chatbot_output):
356
- chatbot_output[-1] = update
357
  chunk_response = update["content"]
358
- progress_text = update_progress(chunk_idx, len(chunks), "Analyzing")
359
- yield (chatbot_output, download_output, final_summary, progress_text)
 
 
 
360
 
361
  combined_response += f"--- Analysis for Chunk {chunk_idx} ---\n{chunk_response}\n"
362
-
363
- # Clean up memory
364
  torch.cuda.empty_cache()
365
  gc.collect()
366
 
367
- # Generate final summary
368
- final_summary = summarize_findings(combined_response)
369
  report_path = os.path.join(report_dir, f"{file_hash_value}_report.txt") if file_hash_value else None
370
  if report_path:
371
  with open(report_path, "w", encoding="utf-8") as f:
372
- f.write(combined_response + "\n\n" + final_summary)
373
 
374
- download_output = report_path if report_path and os.path.exists(report_path) else None
375
- progress_text = {"visible": False}
376
- yield (chatbot_output, download_output, final_summary, progress_text)
 
 
 
377
 
378
  except Exception as e:
379
  logger.error("Analysis error: %s", e)
380
- chatbot_output.append({"role": "assistant", "content": f"❌ Error occurred: {str(e)}"})
381
- final_summary = f"Error occurred during analysis: {str(e)}"
382
- progress_text = {"visible": False}
383
- yield (chatbot_output, download_output, final_summary, progress_text)
384
-
385
- def clear_and_start():
386
- return [
387
- [], # chatbot
388
- None, # download_output
389
- "", # final_summary
390
- "", # msg_input
391
- None, # file_upload
392
- {"visible": False} # progress_text
393
- ]
394
-
395
- def create_ui(agent):
396
- with gr.Blocks(theme=gr.themes.Soft(), title="Clinical Oversight Assistant") as demo:
397
- gr.Markdown("<h1 style='text-align: center;'>🩺 Clinical Oversight Assistant</h1>")
398
-
399
- with gr.Row():
400
- with gr.Column(scale=3):
401
- chatbot = gr.Chatbot(
402
- label="Analysis Conversation",
403
- height=600,
404
- show_copy_button=True,
405
- avatar_images=(
406
- "assets/user.png",
407
- "assets/assistant.png"
408
- ) if os.path.exists("assets/user.png") else None,
409
- type="messages", # Use openai-style messages
410
- render=False
411
- )
412
- with gr.Column(scale=1):
413
- final_summary = gr.Markdown(
414
- label="Summary of Findings",
415
- value="### Summary will appear here\nAfter analysis completes"
416
- )
417
- download_output = gr.File(
418
- label="Download Full Report",
419
- visible=False
420
- )
421
-
422
- with gr.Row():
423
- file_upload = gr.File(
424
- file_types=[".pdf", ".csv", ".xls", ".xlsx"],
425
- file_count="multiple",
426
- label="Upload Patient Records"
427
- )
428
-
429
- with gr.Row():
430
- msg_input = gr.Textbox(
431
- placeholder="Ask about potential oversights...",
432
- show_label=False,
433
- container=False,
434
- scale=7,
435
- autofocus=True
436
- )
437
- send_btn = gr.Button(
438
- "Analyze",
439
- variant="primary",
440
- scale=1,
441
- min_width=100
442
- )
443
-
444
- progress_text = gr.Textbox(
445
- label="Progress",
446
- visible=False,
447
- interactive=False
448
- )
449
-
450
- # Event handlers
451
- send_btn.click(
452
- analyze,
453
- inputs=[msg_input, chatbot, file_upload],
454
- outputs=[chatbot, download_output, final_summary, progress_text],
455
- show_progress="hidden"
456
- )
457
-
458
- msg_input.submit(
459
- analyze,
460
- inputs=[msg_input, chatbot, file_upload],
461
- outputs=[chatbot, download_output, final_summary, progress_text],
462
- show_progress="hidden"
463
- )
464
-
465
- demo.load(
466
- clear_and_start,
467
- outputs=[chatbot, download_output, final_summary, msg_input, file_upload, progress_text],
468
- queue=False
469
- )
470
-
471
- return demo
472
-
473
- if __name__ == "__main__":
474
- try:
475
- logger.info("Launching app...")
476
- agent = init_agent()
477
- demo = create_ui(agent)
478
- demo.queue(
479
- api_open=False,
480
- max_size=20
481
- ).launch(
482
- server_name="0.0.0.0",
483
- server_port=7860,
484
- show_error=True,
485
- allowed_paths=[report_dir],
486
- share=False
487
- )
488
- except Exception as e:
489
- logger.error(f"Failed to launch app: {e}")
490
- raise
491
- finally:
492
- if torch.distributed.is_initialized():
493
- torch.distributed.destroy_process_group()
 
1
+ # Update the Chatbot component in create_ui() to use the new message format:
2
+ chatbot = gr.Chatbot(
3
+ label="Analysis Conversation",
4
+ height=600,
5
+ show_copy_button=True,
6
+ avatar_images=(
7
+ "assets/user.png",
8
+ "assets/assistant.png"
9
+ ) if os.path.exists("assets/user.png") else None,
10
+ render=False,
11
+ bubble_full_width=False,
12
+ type="messages" # Add this line to use the new format
13
+ )
14
+
15
+ # Update the analyze function to properly return all outputs:
16
+ def analyze(message: str, history: List[dict], files: List) -> Generator[Dict[str, Any], None, None]:
17
+ # Initialize all outputs
18
+ outputs = {
19
+ "chatbot": history.copy(),
20
+ "download_output": None,
21
+ "final_summary": "",
22
+ "progress_text": {"value": "Starting analysis...", "visible": True}
23
+ }
24
+ yield outputs # First yield with all outputs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  try:
27
+ # Add user message to history
28
+ history.append({"role": "user", "content": message})
29
+ outputs["chatbot"] = history
30
+ yield outputs
31
 
32
  extracted = []
33
  file_hash_value = ""
 
43
  for i, future in enumerate(as_completed(futures), 1):
44
  try:
45
  extracted.extend(future.result())
46
+ outputs["progress_text"] = update_progress(i, len(files), "Processing files")
47
+ yield outputs
48
  except Exception as e:
49
  logger.error(f"File processing error: {e}")
50
  extracted.append({"error": f"Error processing file: {str(e)}"})
51
 
52
  file_hash_value = file_hash(files[0].name) if files else ""
53
+ history.append({"role": "assistant", "content": "✅ File processing complete"})
54
+ outputs.update({
55
+ "chatbot": history,
56
+ "progress_text": update_progress(len(files), len(files), "Files processed")
57
+ })
58
+ yield outputs
59
+
60
+ # Process content and generate responses
61
  text_content = "\n".join(json.dumps(item) for item in extracted)
 
 
62
  chunks = tokenize_and_chunk(text_content)
63
  combined_response = ""
64
 
65
  for chunk_idx, chunk in enumerate(chunks, 1):
66
+ prompt = f"""Analyze this patient record for missed diagnoses...""" # Your prompt here
 
 
 
 
 
67
 
68
+ history.append({"role": "assistant", "content": ""})
69
+ outputs.update({
70
+ "chatbot": history,
71
+ "progress_text": update_progress(chunk_idx, len(chunks), "Analyzing")
72
+ })
73
+ yield outputs
74
 
75
+ # Process response stream
76
  chunk_response = ""
77
+ for update in process_response_stream(prompt, history):
78
+ history[-1] = update
79
  chunk_response = update["content"]
80
+ outputs.update({
81
+ "chatbot": history,
82
+ "progress_text": update_progress(chunk_idx, len(chunks), "Analyzing")
83
+ })
84
+ yield outputs
85
 
86
  combined_response += f"--- Analysis for Chunk {chunk_idx} ---\n{chunk_response}\n"
 
 
87
  torch.cuda.empty_cache()
88
  gc.collect()
89
 
90
+ # Final outputs
91
+ summary = summarize_findings(combined_response)
92
  report_path = os.path.join(report_dir, f"{file_hash_value}_report.txt") if file_hash_value else None
93
  if report_path:
94
  with open(report_path, "w", encoding="utf-8") as f:
95
+ f.write(combined_response + "\n\n" + summary)
96
 
97
+ outputs.update({
98
+ "download_output": report_path if report_path else None,
99
+ "final_summary": summary,
100
+ "progress_text": {"visible": False}
101
+ })
102
+ yield outputs
103
 
104
  except Exception as e:
105
  logger.error("Analysis error: %s", e)
106
+ history.append({"role": "assistant", "content": f"❌ Error: {str(e)}"})
107
+ outputs.update({
108
+ "chatbot": history,
109
+ "final_summary": f"Error occurred: {str(e)}",
110
+ "progress_text": {"visible": False}
111
+ })
112
+ yield outputs