Ali2206 commited on
Commit
b8ad099
Β·
verified Β·
1 Parent(s): a57b988

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -31
app.py CHANGED
@@ -3,7 +3,7 @@ import os
3
  import pandas as pd
4
  import json
5
  import gradio as gr
6
- from typing import List, Tuple, Dict, Any, Union
7
  import hashlib
8
  import shutil
9
  import re
@@ -105,46 +105,45 @@ def init_agent():
105
  agent.init_model()
106
  return agent
107
 
108
- def process_final_report(agent, file, chatbot_state: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], Union[str, None], str]:
109
- messages = chatbot_state if chatbot_state else []
110
  if file is None or not hasattr(file, "name"):
111
- return messages + [("assistant", "❌ Please upload a valid Excel file.")], None, ""
 
112
 
113
  messages.append(("user", f"πŸ“Ž Uploaded file: {os.path.basename(file.name)}"))
 
 
114
  text = extract_text_from_excel(file.name)
115
  chunks = split_text_into_chunks(text)
116
- chunk_responses = [None] * len(chunks)
117
 
118
- def analyze_chunk(i, chunk):
119
  prompt = build_prompt_from_text(chunk)
120
- response = ""
121
  for res in agent.run_gradio_chat(
122
  message=prompt, history=[], temperature=0.2,
123
  max_new_tokens=MAX_NEW_TOKENS, max_token=MAX_MODEL_TOKENS,
124
  call_agent=False, conversation=[]
125
  ):
126
  if isinstance(res, str):
127
- response += res
128
  elif hasattr(res, "content"):
129
- response += res.content
130
- elif isinstance(res, list):
131
- for r in res:
132
- if hasattr(r, "content"):
133
- response += r.content
134
- return i, clean_response(response)
135
-
136
- with ThreadPoolExecutor(max_workers=1) as executor:
137
- futures = [executor.submit(analyze_chunk, i, c) for i, c in enumerate(chunks)]
138
- for f in as_completed(futures):
139
- i, result = f.result()
140
- chunk_responses[i] = result
141
 
142
  valid = [r for r in chunk_responses if r and not r.startswith("❌")]
143
  if not valid:
144
- return messages + [("assistant", "❌ No valid results found in the file.")], None, ""
 
 
145
 
146
  summary_prompt = f"Summarize this analysis in a final structured report:\n\n" + "\n\n".join(valid)
147
- messages.append(("assistant", "⏳ Generating the final report..."))
 
148
 
149
  final_report = ""
150
  for res in agent.run_gradio_chat(
@@ -158,13 +157,12 @@ def process_final_report(agent, file, chatbot_state: List[Tuple[str, str]]) -> T
158
  final_report += res.content
159
 
160
  cleaned = clean_response(final_report)
161
- messages.append(("assistant", cleaned)) # βœ… Append answer to chat
162
-
163
  report_path = os.path.join(report_dir, f"report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md")
164
  with open(report_path, 'w') as f:
165
  f.write(f"# 🧠 Final Patient Report\n\n{cleaned}")
166
 
167
- return messages, report_path, cleaned
168
 
169
  def create_ui(agent):
170
  with gr.Blocks(css="""
@@ -213,14 +211,10 @@ Upload clinical Excel records below and click **Analyze** to generate a medical
213
  report_output = gr.File(label="Download Report", visible=False)
214
  chatbot_state = gr.State(value=[])
215
 
216
- def update_ui(file, current_state):
217
- messages, report_path, final_text = process_final_report(agent, file, current_state)
218
- return messages, gr.update(visible=report_path is not None, value=report_path), messages, gr.update(visible=True, value=final_text)
219
-
220
  analyze_btn.click(
221
- fn=update_ui,
222
  inputs=[file_upload, chatbot_state],
223
- outputs=[chatbot, report_output, chatbot_state, report_output_markdown]
224
  )
225
 
226
  return demo
 
3
  import pandas as pd
4
  import json
5
  import gradio as gr
6
+ from typing import List, Tuple, Union, Generator
7
  import hashlib
8
  import shutil
9
  import re
 
105
  agent.init_model()
106
  return agent
107
 
108
+ def stream_report(agent, file: gr.File, chatbot_state: List[Tuple[str, str]]) -> Generator:
109
+ messages = chatbot_state or []
110
  if file is None or not hasattr(file, "name"):
111
+ yield messages + [("assistant", "❌ Please upload a valid Excel file.")], None, ""
112
+ return
113
 
114
  messages.append(("user", f"πŸ“Ž Uploaded file: {os.path.basename(file.name)}"))
115
+ yield messages, None, ""
116
+
117
  text = extract_text_from_excel(file.name)
118
  chunks = split_text_into_chunks(text)
119
+ chunk_responses = []
120
 
121
+ for i, chunk in enumerate(chunks):
122
  prompt = build_prompt_from_text(chunk)
123
+ partial = ""
124
  for res in agent.run_gradio_chat(
125
  message=prompt, history=[], temperature=0.2,
126
  max_new_tokens=MAX_NEW_TOKENS, max_token=MAX_MODEL_TOKENS,
127
  call_agent=False, conversation=[]
128
  ):
129
  if isinstance(res, str):
130
+ partial += res
131
  elif hasattr(res, "content"):
132
+ partial += res.content
133
+ cleaned = clean_response(partial)
134
+ messages.append(("assistant", f"πŸ“„ Chunk {i+1}:\n\n{cleaned}"))
135
+ chunk_responses.append(cleaned)
136
+ yield messages, None, ""
 
 
 
 
 
 
 
137
 
138
  valid = [r for r in chunk_responses if r and not r.startswith("❌")]
139
  if not valid:
140
+ messages.append(("assistant", "❌ No valid results found in the file."))
141
+ yield messages, None, ""
142
+ return
143
 
144
  summary_prompt = f"Summarize this analysis in a final structured report:\n\n" + "\n\n".join(valid)
145
+ messages.append(("assistant", "πŸ“Š Generating final summary..."))
146
+ yield messages, None, ""
147
 
148
  final_report = ""
149
  for res in agent.run_gradio_chat(
 
157
  final_report += res.content
158
 
159
  cleaned = clean_response(final_report)
160
+ messages.append(("assistant", cleaned))
 
161
  report_path = os.path.join(report_dir, f"report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md")
162
  with open(report_path, 'w') as f:
163
  f.write(f"# 🧠 Final Patient Report\n\n{cleaned}")
164
 
165
+ yield messages, report_path, cleaned
166
 
167
  def create_ui(agent):
168
  with gr.Blocks(css="""
 
211
  report_output = gr.File(label="Download Report", visible=False)
212
  chatbot_state = gr.State(value=[])
213
 
 
 
 
 
214
  analyze_btn.click(
215
+ fn=stream_report,
216
  inputs=[file_upload, chatbot_state],
217
+ outputs=[chatbot, report_output, report_output_markdown]
218
  )
219
 
220
  return demo