Update app.py
Browse files
app.py
CHANGED
@@ -32,7 +32,6 @@ sys.path.insert(0, src_path)
|
|
32 |
|
33 |
from txagent.txagent import TxAgent
|
34 |
|
35 |
-
# Constants
|
36 |
MAX_MODEL_TOKENS = 32768
|
37 |
MAX_CHUNK_TOKENS = 8192
|
38 |
MAX_NEW_TOKENS = 2048
|
@@ -68,7 +67,7 @@ def extract_text_from_excel(file_path: str) -> str:
|
|
68 |
def split_text_into_chunks(text: str, max_tokens: int = MAX_CHUNK_TOKENS) -> List[str]:
|
69 |
effective_max_tokens = max_tokens - PROMPT_OVERHEAD
|
70 |
if effective_max_tokens <= 0:
|
71 |
-
raise ValueError(
|
72 |
lines = text.split("\n")
|
73 |
chunks, current_chunk, current_tokens = [], [], 0
|
74 |
for line in lines:
|
@@ -131,9 +130,7 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
|
|
131 |
return messages, report_path
|
132 |
|
133 |
try:
|
134 |
-
messages.append({"role": "user", "content": f"
|
135 |
-
messages.append({"role": "assistant", "content": "π Analyzing clinical data... This may take a moment."})
|
136 |
-
|
137 |
extracted_text = extract_text_from_excel(file.name)
|
138 |
chunks = split_text_into_chunks(extracted_text)
|
139 |
chunk_responses = [None] * len(chunks)
|
@@ -142,7 +139,7 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
|
|
142 |
prompt = build_prompt_from_text(chunk)
|
143 |
prompt_tokens = estimate_tokens(prompt)
|
144 |
if prompt_tokens > MAX_MODEL_TOKENS:
|
145 |
-
return index, f"β Chunk {index+1} prompt too long
|
146 |
response = ""
|
147 |
try:
|
148 |
for result in agent.run_gradio_chat(
|
@@ -154,79 +151,47 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
|
|
154 |
call_agent=False,
|
155 |
conversation=[],
|
156 |
):
|
157 |
-
if isinstance(result, str)
|
158 |
-
response += result
|
159 |
-
elif hasattr(result, "content"):
|
160 |
-
response += result.content
|
161 |
-
elif isinstance(result, list):
|
162 |
-
for r in result:
|
163 |
-
if hasattr(r, "content"):
|
164 |
-
response += r.content
|
165 |
except Exception as e:
|
166 |
return index, f"β Error analyzing chunk {index+1}: {str(e)}"
|
167 |
return index, clean_response(response)
|
168 |
|
169 |
-
# Process chunks silently without displaying progress
|
170 |
with ThreadPoolExecutor(max_workers=1) as executor:
|
171 |
futures = [executor.submit(analyze_chunk, i, chunk) for i, chunk in enumerate(chunks)]
|
172 |
for future in as_completed(futures):
|
173 |
i, result = future.result()
|
174 |
chunk_responses[i] = result
|
|
|
|
|
175 |
|
176 |
valid_responses = [res for res in chunk_responses if not res.startswith("β")]
|
177 |
if not valid_responses:
|
178 |
-
messages.append({"role": "assistant", "content": "β No valid
|
179 |
return messages, report_path
|
180 |
|
181 |
summary = "\n\n".join(valid_responses)
|
182 |
-
final_prompt = f"
|
183 |
-
|
184 |
-
{summary}
|
185 |
-
|
186 |
-
Structure your response with clear sections:
|
187 |
-
1. Key Diagnostic Patterns
|
188 |
-
2. Medication Concerns
|
189 |
-
3. Potential Missed Opportunities
|
190 |
-
4. Notable Inconsistencies
|
191 |
-
5. Recommended Follow-ups
|
192 |
-
|
193 |
-
Use bullet points for clarity and professional medical terminology."""
|
194 |
-
|
195 |
-
final_report_text = ""
|
196 |
-
try:
|
197 |
-
for result in agent.run_gradio_chat(
|
198 |
-
message=final_prompt,
|
199 |
-
history=[],
|
200 |
-
temperature=0.2,
|
201 |
-
max_new_tokens=MAX_NEW_TOKENS,
|
202 |
-
max_token=MAX_MODEL_TOKENS,
|
203 |
-
call_agent=False,
|
204 |
-
conversation=[],
|
205 |
-
):
|
206 |
-
if isinstance(result, str):
|
207 |
-
final_report_text += result
|
208 |
-
elif hasattr(result, "content"):
|
209 |
-
final_report_text += result.content
|
210 |
-
elif isinstance(result, list):
|
211 |
-
for r in result:
|
212 |
-
if hasattr(r, "content"):
|
213 |
-
final_report_text += r.content
|
214 |
-
except Exception as e:
|
215 |
-
messages.append({"role": "assistant", "content": f"β Error generating final report: {str(e)}"})
|
216 |
-
return messages, report_path
|
217 |
-
|
218 |
-
final_report = f"# π§ Clinical Analysis Report\n\n{clean_response(final_report_text)}"
|
219 |
-
|
220 |
-
# Update the last message with the final report
|
221 |
-
messages[-1]["content"] = f"## π Clinical Analysis Report\n\n{clean_response(final_report_text)}"
|
222 |
-
|
223 |
-
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
224 |
-
report_path = os.path.join(report_dir, f"clinical_report_{timestamp}.md")
|
225 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
226 |
with open(report_path, 'w') as f:
|
227 |
-
f.write(
|
228 |
|
229 |
-
messages.append({"role": "assistant", "content": f"
|
|
|
230 |
|
231 |
except Exception as e:
|
232 |
messages.append({"role": "assistant", "content": f"β Error processing file: {str(e)}"})
|
@@ -234,155 +199,45 @@ Use bullet points for clarity and professional medical terminology."""
|
|
234 |
return messages, report_path
|
235 |
|
236 |
def create_ui(agent):
|
237 |
-
with gr.Blocks(
|
238 |
-
title="Clinical Analysis Tool",
|
239 |
-
css="""
|
240 |
.gradio-container {
|
241 |
-
max-width: 900px
|
242 |
margin: auto;
|
243 |
-
font-family: '
|
244 |
-
background-color: #
|
245 |
}
|
246 |
.gr-button.primary {
|
247 |
-
background: linear-gradient(to right, #
|
248 |
color: white;
|
249 |
border: none;
|
250 |
border-radius: 8px;
|
251 |
-
padding: 12px 24px;
|
252 |
-
font-weight: 500;
|
253 |
-
transition: all 0.2s;
|
254 |
-
}
|
255 |
-
.gr-button.primary:hover {
|
256 |
-
background: linear-gradient(to right, #4338ca, #6d28d9);
|
257 |
-
transform: translateY(-1px);
|
258 |
-
box-shadow: 0 4px 6px rgba(0,0,0,0.1);
|
259 |
-
}
|
260 |
-
.gr-file-upload, .gr-chatbot, .gr-markdown {
|
261 |
-
background-color: white;
|
262 |
-
border-radius: 12px;
|
263 |
-
box-shadow: 0 1px 3px rgba(0,0,0,0.05);
|
264 |
-
padding: 1.5rem;
|
265 |
-
border: 1px solid #e5e7eb;
|
266 |
-
}
|
267 |
-
.gr-chatbot {
|
268 |
-
min-height: 600px;
|
269 |
-
border-left: none;
|
270 |
}
|
271 |
-
.
|
272 |
-
background-color: #f3f4f6;
|
273 |
-
border-radius: 12px;
|
274 |
-
padding: 12px 16px;
|
275 |
-
margin: 8px 0;
|
276 |
-
}
|
277 |
-
.chat-message-assistant {
|
278 |
background-color: white;
|
279 |
-
border-radius:
|
280 |
-
|
281 |
-
margin: 8px 0;
|
282 |
-
border: 1px solid #e5e7eb;
|
283 |
-
}
|
284 |
-
.chat-message-content ul, .chat-message-content ol {
|
285 |
-
padding-left: 1.5em;
|
286 |
-
margin: 0.5em 0;
|
287 |
-
}
|
288 |
-
.chat-message-content li {
|
289 |
-
margin: 0.3em 0;
|
290 |
-
}
|
291 |
-
h1, h2, h3 {
|
292 |
-
color: #111827;
|
293 |
-
}
|
294 |
-
.gr-markdown h1 {
|
295 |
-
font-size: 1.8rem;
|
296 |
-
margin-bottom: 1rem;
|
297 |
-
font-weight: 600;
|
298 |
}
|
299 |
-
|
300 |
-
color: #4b5563;
|
301 |
-
line-height: 1.6;
|
302 |
-
}
|
303 |
-
.progress-bar {
|
304 |
-
height: 4px;
|
305 |
-
background: #e5e7eb;
|
306 |
-
border-radius: 2px;
|
307 |
-
margin: 12px 0;
|
308 |
-
overflow: hidden;
|
309 |
-
}
|
310 |
-
.progress-bar-fill {
|
311 |
-
height: 100%;
|
312 |
-
background: linear-gradient(to right, #4f46e5, #7c3aed);
|
313 |
-
transition: width 0.3s ease;
|
314 |
-
}
|
315 |
-
"""
|
316 |
-
) as demo:
|
317 |
gr.Markdown("""
|
318 |
-
<
|
319 |
-
|
320 |
-
<p style='color: #6b7280; margin-top: 0;'>Upload patient records in Excel format for comprehensive clinical analysis</p>
|
321 |
-
</div>
|
322 |
""")
|
323 |
|
324 |
with gr.Row():
|
325 |
with gr.Column(scale=3):
|
326 |
-
chatbot = gr.Chatbot(
|
327 |
-
label="Analysis Results",
|
328 |
-
show_copy_button=True,
|
329 |
-
height=600,
|
330 |
-
bubble_full_width=False,
|
331 |
-
avatar_images=(None, "https://i.imgur.com/6wX7Zb4.png"),
|
332 |
-
render_markdown=True
|
333 |
-
)
|
334 |
with gr.Column(scale=1):
|
335 |
-
file_upload = gr.File(
|
336 |
-
|
337 |
-
|
338 |
-
height=100,
|
339 |
-
interactive=True
|
340 |
-
)
|
341 |
-
analyze_btn = gr.Button(
|
342 |
-
"Analyze Clinical Data",
|
343 |
-
variant="primary",
|
344 |
-
elem_classes="primary"
|
345 |
-
)
|
346 |
-
report_output = gr.File(
|
347 |
-
label="Download Report",
|
348 |
-
visible=False,
|
349 |
-
interactive=False
|
350 |
-
)
|
351 |
-
gr.Markdown("""
|
352 |
-
<div style='margin-top: 1rem; padding: 1rem; background-color: #f8fafc; border-radius: 8px;'>
|
353 |
-
<h3 style='margin-top: 0; margin-bottom: 0.5rem; font-size: 1rem;'>About this tool</h3>
|
354 |
-
<p style='margin: 0; font-size: 0.9rem; color: #64748b;'>
|
355 |
-
This tool analyzes clinical documentation to identify patterns, inconsistencies, and opportunities for improved patient care.
|
356 |
-
</p>
|
357 |
-
</div>
|
358 |
-
""")
|
359 |
|
360 |
chatbot_state = gr.State(value=[])
|
361 |
|
362 |
def update_ui(file, current_state):
|
363 |
messages, report_path = process_final_report(agent, file, current_state)
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
content = msg.get("content", "")
|
368 |
-
if role == "assistant":
|
369 |
-
# Format lists and sections for better readability
|
370 |
-
content = content.replace("- ", "β’ ")
|
371 |
-
content = re.sub(r"(\d+\.\s)", r"\n\1", content)
|
372 |
-
content = f"<div class='chat-message-assistant'>{content}</div>"
|
373 |
-
else:
|
374 |
-
content = f"<div class='chat-message-user'>{content}</div>"
|
375 |
-
formatted_messages.append({"role": role, "content": content})
|
376 |
-
|
377 |
-
report_update = gr.update(visible=report_path is not None, value=report_path)
|
378 |
-
return formatted_messages, report_update, formatted_messages
|
379 |
-
|
380 |
-
analyze_btn.click(
|
381 |
-
fn=update_ui,
|
382 |
-
inputs=[file_upload, chatbot_state],
|
383 |
-
outputs=[chatbot, report_output, chatbot_state],
|
384 |
-
api_name="analyze"
|
385 |
-
)
|
386 |
|
387 |
return demo
|
388 |
|
@@ -390,13 +245,7 @@ if __name__ == "__main__":
|
|
390 |
try:
|
391 |
agent = init_agent()
|
392 |
demo = create_ui(agent)
|
393 |
-
demo.launch(
|
394 |
-
server_name="0.0.0.0",
|
395 |
-
server_port=7860,
|
396 |
-
show_error=True,
|
397 |
-
allowed_paths=["/data/hf_cache/reports"],
|
398 |
-
share=False
|
399 |
-
)
|
400 |
except Exception as e:
|
401 |
print(f"Error: {str(e)}")
|
402 |
-
sys.exit(1)
|
|
|
32 |
|
33 |
from txagent.txagent import TxAgent
|
34 |
|
|
|
35 |
MAX_MODEL_TOKENS = 32768
|
36 |
MAX_CHUNK_TOKENS = 8192
|
37 |
MAX_NEW_TOKENS = 2048
|
|
|
67 |
def split_text_into_chunks(text: str, max_tokens: int = MAX_CHUNK_TOKENS) -> List[str]:
|
68 |
effective_max_tokens = max_tokens - PROMPT_OVERHEAD
|
69 |
if effective_max_tokens <= 0:
|
70 |
+
raise ValueError("Effective max tokens must be positive.")
|
71 |
lines = text.split("\n")
|
72 |
chunks, current_chunk, current_tokens = [], [], 0
|
73 |
for line in lines:
|
|
|
130 |
return messages, report_path
|
131 |
|
132 |
try:
|
133 |
+
messages.append({"role": "user", "content": f"Processing Excel file: {os.path.basename(file.name)}"})
|
|
|
|
|
134 |
extracted_text = extract_text_from_excel(file.name)
|
135 |
chunks = split_text_into_chunks(extracted_text)
|
136 |
chunk_responses = [None] * len(chunks)
|
|
|
139 |
prompt = build_prompt_from_text(chunk)
|
140 |
prompt_tokens = estimate_tokens(prompt)
|
141 |
if prompt_tokens > MAX_MODEL_TOKENS:
|
142 |
+
return index, f"β Chunk {index+1} prompt too long. Skipping..."
|
143 |
response = ""
|
144 |
try:
|
145 |
for result in agent.run_gradio_chat(
|
|
|
151 |
call_agent=False,
|
152 |
conversation=[],
|
153 |
):
|
154 |
+
response += getattr(result, "content", result) if isinstance(result, (str, list)) else ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
except Exception as e:
|
156 |
return index, f"β Error analyzing chunk {index+1}: {str(e)}"
|
157 |
return index, clean_response(response)
|
158 |
|
|
|
159 |
with ThreadPoolExecutor(max_workers=1) as executor:
|
160 |
futures = [executor.submit(analyze_chunk, i, chunk) for i, chunk in enumerate(chunks)]
|
161 |
for future in as_completed(futures):
|
162 |
i, result = future.result()
|
163 |
chunk_responses[i] = result
|
164 |
+
if result.startswith("β"):
|
165 |
+
messages.append({"role": "assistant", "content": result})
|
166 |
|
167 |
valid_responses = [res for res in chunk_responses if not res.startswith("β")]
|
168 |
if not valid_responses:
|
169 |
+
messages.append({"role": "assistant", "content": "β No valid chunk responses to summarize."})
|
170 |
return messages, report_path
|
171 |
|
172 |
summary = "\n\n".join(valid_responses)
|
173 |
+
final_prompt = f"Summarize the key findings from the following analyses:\n\n{summary}"
|
174 |
+
messages.append({"role": "assistant", "content": "π Generating final report..."})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
175 |
|
176 |
+
final_report_text = ""
|
177 |
+
for result in agent.run_gradio_chat(
|
178 |
+
message=final_prompt,
|
179 |
+
history=[],
|
180 |
+
temperature=0.2,
|
181 |
+
max_new_tokens=MAX_NEW_TOKENS,
|
182 |
+
max_token=MAX_MODEL_TOKENS,
|
183 |
+
call_agent=False,
|
184 |
+
conversation=[],
|
185 |
+
):
|
186 |
+
final_report_text += getattr(result, "content", result) if isinstance(result, (str, list)) else ""
|
187 |
+
|
188 |
+
cleaned = clean_response(final_report_text)
|
189 |
+
report_path = os.path.join(report_dir, f"report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md")
|
190 |
with open(report_path, 'w') as f:
|
191 |
+
f.write(f"# π§ Final Patient Report\n\n{cleaned}")
|
192 |
|
193 |
+
messages.append({"role": "assistant", "content": f"π Final Report:\n\n{cleaned}"})
|
194 |
+
messages.append({"role": "assistant", "content": f"β
Report generated and saved: {os.path.basename(report_path)}"})
|
195 |
|
196 |
except Exception as e:
|
197 |
messages.append({"role": "assistant", "content": f"β Error processing file: {str(e)}"})
|
|
|
199 |
return messages, report_path
|
200 |
|
201 |
def create_ui(agent):
|
202 |
+
with gr.Blocks(css="""
|
|
|
|
|
203 |
.gradio-container {
|
204 |
+
max-width: 900px;
|
205 |
margin: auto;
|
206 |
+
font-family: 'Segoe UI', sans-serif;
|
207 |
+
background-color: #f9fafc;
|
208 |
}
|
209 |
.gr-button.primary {
|
210 |
+
background: linear-gradient(to right, #4b6cb7, #182848);
|
211 |
color: white;
|
212 |
border: none;
|
213 |
border-radius: 8px;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
}
|
215 |
+
.gr-chatbot, .gr-markdown, .gr-file-upload {
|
|
|
|
|
|
|
|
|
|
|
|
|
216 |
background-color: white;
|
217 |
+
border-radius: 10px;
|
218 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
219 |
}
|
220 |
+
""") as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
221 |
gr.Markdown("""
|
222 |
+
<h2 style='color:#182848'>π₯ Patient History Analysis Tool</h2>
|
223 |
+
<p>Upload your clinical Excel file to receive a professional diagnostic summary.</p>
|
|
|
|
|
224 |
""")
|
225 |
|
226 |
with gr.Row():
|
227 |
with gr.Column(scale=3):
|
228 |
+
chatbot = gr.Chatbot(label="Clinical Assistant", height=600, type="messages")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
229 |
with gr.Column(scale=1):
|
230 |
+
file_upload = gr.File(label="Upload Excel File", file_types=[".xlsx"])
|
231 |
+
analyze_btn = gr.Button("π§ Analyze", variant="primary")
|
232 |
+
report_output = gr.File(label="Download Report", visible=False, interactive=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
233 |
|
234 |
chatbot_state = gr.State(value=[])
|
235 |
|
236 |
def update_ui(file, current_state):
|
237 |
messages, report_path = process_final_report(agent, file, current_state)
|
238 |
+
return messages, gr.update(visible=report_path is not None, value=report_path), messages
|
239 |
+
|
240 |
+
analyze_btn.click(fn=update_ui, inputs=[file_upload, chatbot_state], outputs=[chatbot, report_output, chatbot_state])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
241 |
|
242 |
return demo
|
243 |
|
|
|
245 |
try:
|
246 |
agent = init_agent()
|
247 |
demo = create_ui(agent)
|
248 |
+
demo.launch(server_name="0.0.0.0", server_port=7860, allowed_paths=["/data/hf_cache/reports"], share=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
249 |
except Exception as e:
|
250 |
print(f"Error: {str(e)}")
|
251 |
+
sys.exit(1)
|