Ali2206 commited on
Commit
6f1a22c
Β·
verified Β·
1 Parent(s): b8ad099

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -62
app.py CHANGED
@@ -44,7 +44,10 @@ def estimate_tokens(text: str) -> int:
44
 
45
  def extract_text_from_excel(file_path: str) -> str:
46
  all_text = []
47
- xls = pd.ExcelFile(file_path)
 
 
 
48
  for sheet_name in xls.sheet_names:
49
  df = xls.parse(sheet_name).astype(str).fillna("")
50
  rows = df.apply(lambda row: " | ".join([cell for cell in row if cell.strip()]), axis=1)
@@ -105,64 +108,55 @@ def init_agent():
105
  agent.init_model()
106
  return agent
107
 
108
- def stream_report(agent, file: gr.File, chatbot_state: List[Tuple[str, str]]) -> Generator:
109
- messages = chatbot_state or []
110
- if file is None or not hasattr(file, "name"):
111
- yield messages + [("assistant", "❌ Please upload a valid Excel file.")], None, ""
112
- return
113
-
114
- messages.append(("user", f"πŸ“Ž Uploaded file: {os.path.basename(file.name)}"))
115
- yield messages, None, ""
116
-
117
- text = extract_text_from_excel(file.name)
118
- chunks = split_text_into_chunks(text)
119
- chunk_responses = []
120
-
121
- for i, chunk in enumerate(chunks):
122
- prompt = build_prompt_from_text(chunk)
123
- partial = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  for res in agent.run_gradio_chat(
125
- message=prompt, history=[], temperature=0.2,
126
  max_new_tokens=MAX_NEW_TOKENS, max_token=MAX_MODEL_TOKENS,
127
  call_agent=False, conversation=[]
128
  ):
129
  if isinstance(res, str):
130
- partial += res
131
  elif hasattr(res, "content"):
132
- partial += res.content
133
- cleaned = clean_response(partial)
134
- messages.append(("assistant", f"πŸ“„ Chunk {i+1}:\n\n{cleaned}"))
135
- chunk_responses.append(cleaned)
136
- yield messages, None, ""
137
-
138
- valid = [r for r in chunk_responses if r and not r.startswith("❌")]
139
- if not valid:
140
- messages.append(("assistant", "❌ No valid results found in the file."))
141
- yield messages, None, ""
142
- return
143
-
144
- summary_prompt = f"Summarize this analysis in a final structured report:\n\n" + "\n\n".join(valid)
145
- messages.append(("assistant", "πŸ“Š Generating final summary..."))
146
- yield messages, None, ""
147
-
148
- final_report = ""
149
- for res in agent.run_gradio_chat(
150
- message=summary_prompt, history=[], temperature=0.2,
151
- max_new_tokens=MAX_NEW_TOKENS, max_token=MAX_MODEL_TOKENS,
152
- call_agent=False, conversation=[]
153
- ):
154
- if isinstance(res, str):
155
- final_report += res
156
- elif hasattr(res, "content"):
157
- final_report += res.content
158
-
159
- cleaned = clean_response(final_report)
160
- messages.append(("assistant", cleaned))
161
- report_path = os.path.join(report_dir, f"report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md")
162
- with open(report_path, 'w') as f:
163
- f.write(f"# 🧠 Final Patient Report\n\n{cleaned}")
164
-
165
- yield messages, report_path, cleaned
166
 
167
  def create_ui(agent):
168
  with gr.Blocks(css="""
@@ -180,11 +174,11 @@ def create_ui(agent):
180
  border-radius: 0;
181
  background-color: #1a1f2e;
182
  }
183
- .chatbot {
184
  background-color: #131720;
185
  border-radius: 12px;
186
  padding: 20px;
187
- height: 600px;
188
  overflow-y: auto;
189
  border: 1px solid #2c3344;
190
  }
@@ -204,17 +198,16 @@ def create_ui(agent):
204
  gr.Markdown("""# 🧠 Clinical Reasoning Assistant
205
  Upload clinical Excel records below and click **Analyze** to generate a medical summary.
206
  """)
207
- chatbot = gr.Chatbot(label="Chatbot", elem_classes="chatbot", type="tuples")
208
- report_output_markdown = gr.Markdown(visible=False)
209
  file_upload = gr.File(label="Upload Excel File", file_types=[".xlsx"])
210
  analyze_btn = gr.Button("Analyze")
211
- report_output = gr.File(label="Download Report", visible=False)
212
- chatbot_state = gr.State(value=[])
 
213
 
214
  analyze_btn.click(
215
  fn=stream_report,
216
- inputs=[file_upload, chatbot_state],
217
- outputs=[chatbot, report_output, report_output_markdown]
218
  )
219
 
220
  return demo
@@ -223,7 +216,7 @@ if __name__ == "__main__":
223
  try:
224
  agent = init_agent()
225
  demo = create_ui(agent)
226
- demo.launch(server_name="0.0.0.0", server_port=7860, allowed_paths=["/data/hf_cache/reports"], share=False)
227
  except Exception as e:
228
  print(f"Error: {str(e)}")
229
  sys.exit(1)
 
44
 
45
  def extract_text_from_excel(file_path: str) -> str:
46
  all_text = []
47
+ try:
48
+ xls = pd.ExcelFile(file_path)
49
+ except Exception as e:
50
+ raise ValueError(f"❌ Error reading Excel file: {e}")
51
  for sheet_name in xls.sheet_names:
52
  df = xls.parse(sheet_name).astype(str).fillna("")
53
  rows = df.apply(lambda row: " | ".join([cell for cell in row if cell.strip()]), axis=1)
 
108
  agent.init_model()
109
  return agent
110
 
111
+ def stream_report(agent, file: gr.File, full_output: str) -> Generator:
112
+ accumulated_text = ""
113
+ try:
114
+ if file is None:
115
+ yield "❌ Please upload a valid Excel file.", None, ""
116
+ return
117
+
118
+ filepath = file.name if hasattr(file, "name") else file
119
+ text = extract_text_from_excel(filepath)
120
+ chunks = split_text_into_chunks(text)
121
+
122
+ for i, chunk in enumerate(chunks):
123
+ prompt = build_prompt_from_text(chunk)
124
+ partial = ""
125
+ for res in agent.run_gradio_chat(
126
+ message=prompt, history=[], temperature=0.2,
127
+ max_new_tokens=MAX_NEW_TOKENS, max_token=MAX_MODEL_TOKENS,
128
+ call_agent=False, conversation=[]
129
+ ):
130
+ if isinstance(res, str):
131
+ partial += res
132
+ elif hasattr(res, "content"):
133
+ partial += res.content
134
+ cleaned = clean_response(partial)
135
+ accumulated_text += f"\n\nπŸ“„ **Chunk {i+1}**:\n{cleaned}"
136
+ yield accumulated_text, None, ""
137
+
138
+ summary_prompt = f"Summarize this analysis in a final structured report:\n\n" + accumulated_text
139
+ final_report = ""
140
  for res in agent.run_gradio_chat(
141
+ message=summary_prompt, history=[], temperature=0.2,
142
  max_new_tokens=MAX_NEW_TOKENS, max_token=MAX_MODEL_TOKENS,
143
  call_agent=False, conversation=[]
144
  ):
145
  if isinstance(res, str):
146
+ final_report += res
147
  elif hasattr(res, "content"):
148
+ final_report += res.content
149
+
150
+ cleaned = clean_response(final_report)
151
+ accumulated_text += f"\n\nπŸ“Š **Final Summary**:\n{cleaned}"
152
+ report_path = os.path.join(report_dir, f"report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md")
153
+ with open(report_path, 'w') as f:
154
+ f.write(f"# 🧠 Final Patient Report\n\n{cleaned}")
155
+
156
+ yield accumulated_text, report_path, cleaned
157
+
158
+ except Exception as e:
159
+ yield f"❌ Error: {str(e)}", None, ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
  def create_ui(agent):
162
  with gr.Blocks(css="""
 
174
  border-radius: 0;
175
  background-color: #1a1f2e;
176
  }
177
+ .output-markdown {
178
  background-color: #131720;
179
  border-radius: 12px;
180
  padding: 20px;
181
+ min-height: 600px;
182
  overflow-y: auto;
183
  border: 1px solid #2c3344;
184
  }
 
198
  gr.Markdown("""# 🧠 Clinical Reasoning Assistant
199
  Upload clinical Excel records below and click **Analyze** to generate a medical summary.
200
  """)
 
 
201
  file_upload = gr.File(label="Upload Excel File", file_types=[".xlsx"])
202
  analyze_btn = gr.Button("Analyze")
203
+ report_output_markdown = gr.Markdown(elem_classes="output-markdown")
204
+ report_file = gr.File(label="Download Report", visible=False)
205
+ full_output = gr.State(value="")
206
 
207
  analyze_btn.click(
208
  fn=stream_report,
209
+ inputs=[file_upload, full_output],
210
+ outputs=[report_output_markdown, report_file, full_output]
211
  )
212
 
213
  return demo
 
216
  try:
217
  agent = init_agent()
218
  demo = create_ui(agent)
219
+ demo.launch(server_name="0.0.0.0", server_port=7860, allowed_paths=["/data/hf_cache/reports"], share=True)
220
  except Exception as e:
221
  print(f"Error: {str(e)}")
222
  sys.exit(1)