Ali2206 commited on
Commit
d14630a
·
verified ·
1 Parent(s): faaf806

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -57
app.py CHANGED
@@ -9,6 +9,7 @@ import shutil
9
  import re
10
  from datetime import datetime
11
  import time
 
12
 
13
  # Configuration and setup
14
  persistent_dir = "/data/hf_cache"
@@ -31,7 +32,6 @@ sys.path.insert(0, src_path)
31
 
32
  from txagent.txagent import TxAgent
33
 
34
- # Constants
35
  MAX_MODEL_TOKENS = 32768
36
  MAX_CHUNK_TOKENS = 8192
37
  MAX_NEW_TOKENS = 2048
@@ -131,8 +131,6 @@ def init_agent():
131
  return agent
132
 
133
  def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tuple[List[Dict[str, str]], Union[str, None]]:
134
- from concurrent.futures import ThreadPoolExecutor, as_completed
135
-
136
  messages = chatbot_state if chatbot_state else []
137
  report_path = None
138
 
@@ -145,7 +143,7 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
145
  messages.append({"role": "assistant", "content": "⏳ Extracting and analyzing data..."})
146
 
147
  extracted_text = extract_text_from_excel(file.name)
148
- chunks = split_text_into_chunks(extracted_text, max_tokens=MAX_CHUNK_TOKENS)
149
  chunk_responses = [None] * len(chunks)
150
 
151
  def analyze_chunk(index: int, chunk: str) -> Tuple[int, str]:
@@ -153,7 +151,6 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
153
  prompt_tokens = estimate_tokens(prompt)
154
  if prompt_tokens > MAX_MODEL_TOKENS:
155
  return index, f"❌ Chunk {index+1} prompt too long ({prompt_tokens} tokens). Skipping..."
156
-
157
  response = ""
158
  try:
159
  for result in agent.run_gradio_chat(
@@ -175,10 +172,9 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
175
  response += r.content
176
  except Exception as e:
177
  return index, f"❌ Error analyzing chunk {index+1}: {str(e)}"
178
-
179
  return index, clean_response(response)
180
 
181
- with ThreadPoolExecutor(max_workers=4) as executor:
182
  futures = [executor.submit(analyze_chunk, i, chunk) for i, chunk in enumerate(chunks)]
183
  for future in as_completed(futures):
184
  i, result = future.result()
@@ -268,11 +264,11 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
268
  messages.append({"role": "assistant", "content": f"❌ Error processing file: {str(e)}"})
269
 
270
  return messages, report_path
 
271
  def create_ui(agent):
272
- """Create the Gradio UI for the patient history analysis tool."""
273
  with gr.Blocks(
274
  title="Patient History Chat",
275
- css="""
276
  .gradio-container {
277
  max-width: 900px !important;
278
  margin: auto;
@@ -307,42 +303,24 @@ def create_ui(agent):
307
  padding-left: 1.2em;
308
  margin: 0.4em 0;
309
  }
310
- """
311
  ) as demo:
312
- gr.Markdown("""
313
- <h2 style='color:#182848'>🏥 Patient History Analysis Tool</h2>
314
- <p style='color:#444;'>Upload an Excel file containing clinical data. The assistant will analyze it for patterns, inconsistencies, and recommendations.</p>
315
- """)
316
 
317
  with gr.Row():
318
  with gr.Column(scale=3):
319
  chatbot = gr.Chatbot(
320
- label="Clinical Assistant",
321
  show_copy_button=True,
322
  height=600,
323
- type="messages",
324
- avatar_images=(
325
- None,
326
- "https://i.imgur.com/6wX7Zb4.png"
327
- ),
328
  render_markdown=True
329
  )
330
  with gr.Column(scale=1):
331
- file_upload = gr.File(
332
- label="Upload Excel File",
333
- file_types=[".xlsx"],
334
- height=100
335
- )
336
- analyze_btn = gr.Button(
337
- "🧠 Analyze Patient History",
338
- variant="primary",
339
- elem_classes="primary"
340
- )
341
- report_output = gr.File(
342
- label="Download Report",
343
- visible=False,
344
- interactive=False
345
- )
346
 
347
  chatbot_state = gr.State(value=[])
348
 
@@ -350,36 +328,24 @@ def create_ui(agent):
350
  messages, report_path = process_final_report(agent, file, current_state)
351
  formatted_messages = []
352
  for msg in messages:
353
- role = msg.get("role")
354
- content = msg.get("content", "")
355
- if role == "assistant":
356
- content = content.replace("- ", "\n- ") # Ensure bullet formatting
357
- content = f"<div class='chat-message-content'>{content}</div>"
358
- formatted_messages.append({"role": role, "content": content})
359
  report_update = gr.update(visible=report_path is not None, value=report_path)
360
  return formatted_messages, report_update, formatted_messages
361
 
362
- analyze_btn.click(
363
- fn=update_ui,
364
- inputs=[file_upload, chatbot_state],
365
- outputs=[chatbot, report_output, chatbot_state],
366
- api_name="analyze"
367
- )
368
 
369
  return demo
370
 
371
-
372
- if __name__ == "__main__":
373
  try:
374
  agent = init_agent()
375
  demo = create_ui(agent)
376
- demo.launch(
377
- server_name="0.0.0.0",
378
- server_port=7860,
379
- show_error=True,
380
- allowed_paths=["/data/hf_cache/reports"],
381
- share=False
382
- )
383
  except Exception as e:
384
- print(f"Error: {str(e)}")
385
  sys.exit(1)
 
9
  import re
10
  from datetime import datetime
11
  import time
12
+ from concurrent.futures import ThreadPoolExecutor, as_completed
13
 
14
  # Configuration and setup
15
  persistent_dir = "/data/hf_cache"
 
32
 
33
  from txagent.txagent import TxAgent
34
 
 
35
  MAX_MODEL_TOKENS = 32768
36
  MAX_CHUNK_TOKENS = 8192
37
  MAX_NEW_TOKENS = 2048
 
131
  return agent
132
 
133
  def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tuple[List[Dict[str, str]], Union[str, None]]:
 
 
134
  messages = chatbot_state if chatbot_state else []
135
  report_path = None
136
 
 
143
  messages.append({"role": "assistant", "content": "⏳ Extracting and analyzing data..."})
144
 
145
  extracted_text = extract_text_from_excel(file.name)
146
+ chunks = split_text_into_chunks(extracted_text)
147
  chunk_responses = [None] * len(chunks)
148
 
149
  def analyze_chunk(index: int, chunk: str) -> Tuple[int, str]:
 
151
  prompt_tokens = estimate_tokens(prompt)
152
  if prompt_tokens > MAX_MODEL_TOKENS:
153
  return index, f"❌ Chunk {index+1} prompt too long ({prompt_tokens} tokens). Skipping..."
 
154
  response = ""
155
  try:
156
  for result in agent.run_gradio_chat(
 
172
  response += r.content
173
  except Exception as e:
174
  return index, f"❌ Error analyzing chunk {index+1}: {str(e)}"
 
175
  return index, clean_response(response)
176
 
177
+ with ThreadPoolExecutor(max_workers=1) as executor:
178
  futures = [executor.submit(analyze_chunk, i, chunk) for i, chunk in enumerate(chunks)]
179
  for future in as_completed(futures):
180
  i, result = future.result()
 
264
  messages.append({"role": "assistant", "content": f"❌ Error processing file: {str(e)}"})
265
 
266
  return messages, report_path
267
+
268
  def create_ui(agent):
 
269
  with gr.Blocks(
270
  title="Patient History Chat",
271
+ css=\"\"\"
272
  .gradio-container {
273
  max-width: 900px !important;
274
  margin: auto;
 
303
  padding-left: 1.2em;
304
  margin: 0.4em 0;
305
  }
306
+ \"\"\"
307
  ) as demo:
308
+ gr.Markdown(\"\"\"\n<h2 style='color:#182848'>🏥 Patient History Analysis Tool</h2>\n<p style='color:#444;'>Upload an Excel file containing clinical data. The assistant will analyze it for patterns, inconsistencies, and recommendations.</p>\n\"\"\")
 
 
 
309
 
310
  with gr.Row():
311
  with gr.Column(scale=3):
312
  chatbot = gr.Chatbot(
313
+ label=\"Clinical Assistant\",
314
  show_copy_button=True,
315
  height=600,
316
+ type=\"messages\",
317
+ avatar_images=(None, \"https://i.imgur.com/6wX7Zb4.png\"),
 
 
 
318
  render_markdown=True
319
  )
320
  with gr.Column(scale=1):
321
+ file_upload = gr.File(label=\"Upload Excel File\", file_types=[\".xlsx\"], height=100)
322
+ analyze_btn = gr.Button(\"🧠 Analyze Patient History\", variant=\"primary\", elem_classes=\"primary\")
323
+ report_output = gr.File(label=\"Download Report\", visible=False, interactive=False)
 
 
 
 
 
 
 
 
 
 
 
 
324
 
325
  chatbot_state = gr.State(value=[])
326
 
 
328
  messages, report_path = process_final_report(agent, file, current_state)
329
  formatted_messages = []
330
  for msg in messages:
331
+ role = msg.get(\"role\")
332
+ content = msg.get(\"content\", \"\")
333
+ if role == \"assistant\":
334
+ content = content.replace(\"- \", \"\\n- \")
335
+ content = f\"<div class='chat-message-content'>{content}</div>\"
336
+ formatted_messages.append({\"role\": role, \"content\": content})
337
  report_update = gr.update(visible=report_path is not None, value=report_path)
338
  return formatted_messages, report_update, formatted_messages
339
 
340
+ analyze_btn.click(fn=update_ui, inputs=[file_upload, chatbot_state], outputs=[chatbot, report_output, chatbot_state], api_name=\"analyze\")
 
 
 
 
 
341
 
342
  return demo
343
 
344
+ if __name__ == \"__main__\":
 
345
  try:
346
  agent = init_agent()
347
  demo = create_ui(agent)
348
+ demo.launch(server_name=\"0.0.0.0\", server_port=7860, show_error=True, allowed_paths=[\"/data/hf_cache/reports\"], share=False)
 
 
 
 
 
 
349
  except Exception as e:
350
+ print(f\"Error: {str(e)}\")
351
  sys.exit(1)