Update app.py
Browse files
app.py
CHANGED
@@ -126,20 +126,63 @@ def init_agent():
|
|
126 |
return agent
|
127 |
|
128 |
|
129 |
-
def stream_final_report(agent, file) -> Generator[Tuple[List[Dict[str, str]], str], None, None]:
|
|
|
|
|
|
|
|
|
130 |
if file is None or not hasattr(file, "name"):
|
131 |
-
|
|
|
132 |
return
|
133 |
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
for result in agent.run_gradio_chat(
|
142 |
-
message=
|
143 |
history=[],
|
144 |
temperature=0.2,
|
145 |
max_new_tokens=MAX_NEW_TOKENS,
|
@@ -148,57 +191,84 @@ def stream_final_report(agent, file) -> Generator[Tuple[List[Dict[str, str]], st
|
|
148 |
conversation=[],
|
149 |
):
|
150 |
if isinstance(result, str):
|
151 |
-
|
152 |
elif hasattr(result, "content"):
|
153 |
-
|
154 |
elif isinstance(result, list):
|
155 |
for r in result:
|
156 |
if hasattr(r, "content"):
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
for r in result:
|
178 |
-
if hasattr(r, "content"):
|
179 |
-
stream_text += r.content
|
180 |
-
messages.append({"role": "assistant", "content": clean_response(stream_text)})
|
181 |
-
yield (messages, None)
|
182 |
-
|
183 |
-
final_report = f"# \U0001f9e0 Final Patient Report\n\n{clean_response(stream_text)}"
|
184 |
-
report_path = os.path.join(report_dir, f"report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md")
|
185 |
-
with open(report_path, 'w') as f:
|
186 |
-
f.write(final_report)
|
187 |
-
|
188 |
-
messages.append({"role": "assistant", "content": final_report})
|
189 |
-
yield (messages, report_path)
|
190 |
|
191 |
def create_ui(agent):
|
192 |
-
with gr.Blocks(title="Patient History Chat") as demo:
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
|
198 |
analyze_btn.click(
|
199 |
fn=lambda file: stream_final_report(agent, file),
|
200 |
inputs=[file_upload],
|
201 |
-
outputs=[chatbot, report_output]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
202 |
)
|
203 |
|
204 |
return demo
|
@@ -212,7 +282,8 @@ if __name__ == "__main__":
|
|
212 |
server_name="0.0.0.0",
|
213 |
server_port=7860,
|
214 |
show_error=True,
|
215 |
-
allowed_paths=["/data/hf_cache/reports"]
|
|
|
216 |
)
|
217 |
except Exception as e:
|
218 |
print(f"Error: {str(e)}")
|
|
|
126 |
return agent
|
127 |
|
128 |
|
129 |
+
def stream_final_report(agent, file) -> Generator[Tuple[List[Dict[str, str]], Union[str, None]], None, None]:
|
130 |
+
# Initialize with empty values
|
131 |
+
messages = []
|
132 |
+
report_path = None
|
133 |
+
|
134 |
if file is None or not hasattr(file, "name"):
|
135 |
+
messages = [{"role": "assistant", "content": "β Please upload a valid Excel file before analyzing."}]
|
136 |
+
yield messages, None
|
137 |
return
|
138 |
|
139 |
+
try:
|
140 |
+
# Initial processing message
|
141 |
+
messages = [{"role": "user", "content": f"Processing Excel file: {file.name}"},
|
142 |
+
{"role": "assistant", "content": "β³ Extracting and analyzing data..."}]
|
143 |
+
yield messages, None
|
144 |
+
|
145 |
+
extracted_text = extract_text_from_excel(file.name)
|
146 |
+
chunks = split_text_into_chunks(extracted_text)
|
147 |
+
chunk_responses = []
|
148 |
+
|
149 |
+
# Process each chunk
|
150 |
+
for i, chunk in enumerate(chunks):
|
151 |
+
messages.append({"role": "assistant", "content": f"π Analyzing chunk {i+1}/{len(chunks)}..."})
|
152 |
+
yield messages, None
|
153 |
+
|
154 |
+
prompt = build_prompt_from_text(chunk)
|
155 |
+
response = ""
|
156 |
+
for result in agent.run_gradio_chat(
|
157 |
+
message=prompt,
|
158 |
+
history=[],
|
159 |
+
temperature=0.2,
|
160 |
+
max_new_tokens=MAX_NEW_TOKENS,
|
161 |
+
max_token=MAX_TOKENS,
|
162 |
+
call_agent=False,
|
163 |
+
conversation=[],
|
164 |
+
):
|
165 |
+
if isinstance(result, str):
|
166 |
+
response += result
|
167 |
+
elif hasattr(result, "content"):
|
168 |
+
response += result.content
|
169 |
+
elif isinstance(result, list):
|
170 |
+
for r in result:
|
171 |
+
if hasattr(r, "content"):
|
172 |
+
response += r.content
|
173 |
+
|
174 |
+
chunk_responses.append(clean_response(response))
|
175 |
+
messages.append({"role": "assistant", "content": f"β
Chunk {i+1} analysis complete"})
|
176 |
+
yield messages, None
|
177 |
+
|
178 |
+
# Final summarization
|
179 |
+
final_prompt = "\n\n".join(chunk_responses) + "\n\nSummarize the key findings above."
|
180 |
+
messages.append({"role": "assistant", "content": "π Generating final report..."})
|
181 |
+
yield messages, None
|
182 |
+
|
183 |
+
stream_text = ""
|
184 |
for result in agent.run_gradio_chat(
|
185 |
+
message=final_prompt,
|
186 |
history=[],
|
187 |
temperature=0.2,
|
188 |
max_new_tokens=MAX_NEW_TOKENS,
|
|
|
191 |
conversation=[],
|
192 |
):
|
193 |
if isinstance(result, str):
|
194 |
+
stream_text += result
|
195 |
elif hasattr(result, "content"):
|
196 |
+
stream_text += result.content
|
197 |
elif isinstance(result, list):
|
198 |
for r in result:
|
199 |
if hasattr(r, "content"):
|
200 |
+
stream_text += r.content
|
201 |
+
|
202 |
+
messages[-1]["content"] = f"π Generating final report...\n\n{clean_response(stream_text)}"
|
203 |
+
yield messages, None
|
204 |
+
|
205 |
+
# Save final report
|
206 |
+
final_report = f"# \U0001f9e0 Final Patient Report\n\n{clean_response(stream_text)}"
|
207 |
+
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
208 |
+
report_path = os.path.join(report_dir, f"report_{timestamp}.md")
|
209 |
+
|
210 |
+
with open(report_path, 'w') as f:
|
211 |
+
f.write(final_report)
|
212 |
+
|
213 |
+
messages.append({"role": "assistant", "content": f"β
Report generated and saved: report_{timestamp}.md"})
|
214 |
+
yield messages, report_path
|
215 |
+
|
216 |
+
except Exception as e:
|
217 |
+
messages.append({"role": "assistant", "content": f"β Error: {str(e)}"})
|
218 |
+
yield messages, None
|
219 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
220 |
|
221 |
def create_ui(agent):
|
222 |
+
with gr.Blocks(title="Patient History Chat", css=".gradio-container {max-width: 900px !important}") as demo:
|
223 |
+
gr.Markdown("## π₯ Patient History Analysis Tool")
|
224 |
+
|
225 |
+
with gr.Row():
|
226 |
+
with gr.Column(scale=3):
|
227 |
+
chatbot = gr.Chatbot(
|
228 |
+
label="Clinical Assistant",
|
229 |
+
show_copy_button=True,
|
230 |
+
height=600,
|
231 |
+
avatar_images=(
|
232 |
+
None, # User avatar
|
233 |
+
"https://i.imgur.com/6wX7Zb4.png" # Bot avatar
|
234 |
+
)
|
235 |
+
)
|
236 |
+
with gr.Column(scale=1):
|
237 |
+
file_upload = gr.File(
|
238 |
+
label="Upload Excel File",
|
239 |
+
file_types=[".xlsx"],
|
240 |
+
height=100
|
241 |
+
)
|
242 |
+
analyze_btn = gr.Button(
|
243 |
+
"π§ Analyze Patient History",
|
244 |
+
variant="primary"
|
245 |
+
)
|
246 |
+
report_output = gr.File(
|
247 |
+
label="Download Report",
|
248 |
+
visible=False,
|
249 |
+
interactive=False
|
250 |
+
)
|
251 |
+
gr.Examples(
|
252 |
+
examples=["sample_data/sample_patient_history.xlsx"],
|
253 |
+
inputs=file_upload,
|
254 |
+
label="Sample Files"
|
255 |
+
)
|
256 |
|
257 |
analyze_btn.click(
|
258 |
fn=lambda file: stream_final_report(agent, file),
|
259 |
inputs=[file_upload],
|
260 |
+
outputs=[chatbot, report_output],
|
261 |
+
api_name="analyze"
|
262 |
+
)
|
263 |
+
|
264 |
+
def show_report(report_path):
|
265 |
+
if report_path:
|
266 |
+
return gr.File(visible=True, value=report_path)
|
267 |
+
return gr.File(visible=False)
|
268 |
+
|
269 |
+
demo.load(
|
270 |
+
lambda: None,
|
271 |
+
outputs=report_output
|
272 |
)
|
273 |
|
274 |
return demo
|
|
|
282 |
server_name="0.0.0.0",
|
283 |
server_port=7860,
|
284 |
show_error=True,
|
285 |
+
allowed_paths=["/data/hf_cache/reports"],
|
286 |
+
share=False
|
287 |
)
|
288 |
except Exception as e:
|
289 |
print(f"Error: {str(e)}")
|