Ali2206 commited on
Commit
b321961
·
verified ·
1 Parent(s): d14630a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -32
app.py CHANGED
@@ -32,6 +32,7 @@ sys.path.insert(0, src_path)
32
 
33
  from txagent.txagent import TxAgent
34
 
 
35
  MAX_MODEL_TOKENS = 32768
36
  MAX_CHUNK_TOKENS = 8192
37
  MAX_NEW_TOKENS = 2048
@@ -68,26 +69,19 @@ def split_text_into_chunks(text: str, max_tokens: int = MAX_CHUNK_TOKENS) -> Lis
68
  effective_max_tokens = max_tokens - PROMPT_OVERHEAD
69
  if effective_max_tokens <= 0:
70
  raise ValueError(f"Effective max tokens ({effective_max_tokens}) must be positive.")
71
-
72
  lines = text.split("\n")
73
- chunks = []
74
- current_chunk = []
75
- current_tokens = 0
76
-
77
  for line in lines:
78
  line_tokens = estimate_tokens(line)
79
  if current_tokens + line_tokens > effective_max_tokens:
80
  if current_chunk:
81
  chunks.append("\n".join(current_chunk))
82
- current_chunk = [line]
83
- current_tokens = line_tokens
84
  else:
85
  current_chunk.append(line)
86
  current_tokens += line_tokens
87
-
88
  if current_chunk:
89
  chunks.append("\n".join(current_chunk))
90
-
91
  return chunks
92
 
93
  def build_prompt_from_text(chunk: str) -> str:
@@ -113,10 +107,8 @@ Please analyze the above and provide:
113
  def init_agent():
114
  default_tool_path = os.path.abspath("data/new_tool.json")
115
  target_tool_path = os.path.join(tool_cache_dir, "new_tool.json")
116
-
117
  if not os.path.exists(target_tool_path):
118
  shutil.copy(default_tool_path, target_tool_path)
119
-
120
  agent = TxAgent(
121
  model_name="mims-harvard/TxAgent-T1-Llama-3.1-8B",
122
  rag_model_name="mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B",
@@ -141,7 +133,6 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
141
  try:
142
  messages.append({"role": "user", "content": f"Processing Excel file: {os.path.basename(file.name)}"})
143
  messages.append({"role": "assistant", "content": "⏳ Extracting and analyzing data..."})
144
-
145
  extracted_text = extract_text_from_excel(file.name)
146
  chunks = split_text_into_chunks(extracted_text)
147
  chunk_responses = [None] * len(chunks)
@@ -219,7 +210,6 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
219
  except Exception as e:
220
  messages.append({"role": "assistant", "content": f"❌ Error summarizing intermediate results: {str(e)}"})
221
  return messages, report_path
222
-
223
  summary += f"\n\n### Chunk {i+1} Analysis\n{response}"
224
  current_summary_tokens += response_tokens
225
 
@@ -268,7 +258,7 @@ def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tu
268
  def create_ui(agent):
269
  with gr.Blocks(
270
  title="Patient History Chat",
271
- css=\"\"\"
272
  .gradio-container {
273
  max-width: 900px !important;
274
  margin: auto;
@@ -303,24 +293,27 @@ def create_ui(agent):
303
  padding-left: 1.2em;
304
  margin: 0.4em 0;
305
  }
306
- \"\"\"
307
  ) as demo:
308
- gr.Markdown(\"\"\"\n<h2 style='color:#182848'>🏥 Patient History Analysis Tool</h2>\n<p style='color:#444;'>Upload an Excel file containing clinical data. The assistant will analyze it for patterns, inconsistencies, and recommendations.</p>\n\"\"\")
 
 
 
309
 
310
  with gr.Row():
311
  with gr.Column(scale=3):
312
  chatbot = gr.Chatbot(
313
- label=\"Clinical Assistant\",
314
  show_copy_button=True,
315
  height=600,
316
- type=\"messages\",
317
- avatar_images=(None, \"https://i.imgur.com/6wX7Zb4.png\"),
318
  render_markdown=True
319
  )
320
  with gr.Column(scale=1):
321
- file_upload = gr.File(label=\"Upload Excel File\", file_types=[\".xlsx\"], height=100)
322
- analyze_btn = gr.Button(\"🧠 Analyze Patient History\", variant=\"primary\", elem_classes=\"primary\")
323
- report_output = gr.File(label=\"Download Report\", visible=False, interactive=False)
324
 
325
  chatbot_state = gr.State(value=[])
326
 
@@ -328,24 +321,24 @@ def create_ui(agent):
328
  messages, report_path = process_final_report(agent, file, current_state)
329
  formatted_messages = []
330
  for msg in messages:
331
- role = msg.get(\"role\")
332
- content = msg.get(\"content\", \"\")
333
- if role == \"assistant\":
334
- content = content.replace(\"- \", \"\\n- \")
335
- content = f\"<div class='chat-message-content'>{content}</div>\"
336
- formatted_messages.append({\"role\": role, \"content\": content})
337
  report_update = gr.update(visible=report_path is not None, value=report_path)
338
  return formatted_messages, report_update, formatted_messages
339
 
340
- analyze_btn.click(fn=update_ui, inputs=[file_upload, chatbot_state], outputs=[chatbot, report_output, chatbot_state], api_name=\"analyze\")
341
 
342
  return demo
343
 
344
- if __name__ == \"__main__\":
345
  try:
346
  agent = init_agent()
347
  demo = create_ui(agent)
348
- demo.launch(server_name=\"0.0.0.0\", server_port=7860, show_error=True, allowed_paths=[\"/data/hf_cache/reports\"], share=False)
349
  except Exception as e:
350
- print(f\"Error: {str(e)}\")
351
  sys.exit(1)
 
32
 
33
  from txagent.txagent import TxAgent
34
 
35
+ # Constants
36
  MAX_MODEL_TOKENS = 32768
37
  MAX_CHUNK_TOKENS = 8192
38
  MAX_NEW_TOKENS = 2048
 
69
  effective_max_tokens = max_tokens - PROMPT_OVERHEAD
70
  if effective_max_tokens <= 0:
71
  raise ValueError(f"Effective max tokens ({effective_max_tokens}) must be positive.")
 
72
  lines = text.split("\n")
73
+ chunks, current_chunk, current_tokens = [], [], 0
 
 
 
74
  for line in lines:
75
  line_tokens = estimate_tokens(line)
76
  if current_tokens + line_tokens > effective_max_tokens:
77
  if current_chunk:
78
  chunks.append("\n".join(current_chunk))
79
+ current_chunk, current_tokens = [line], line_tokens
 
80
  else:
81
  current_chunk.append(line)
82
  current_tokens += line_tokens
 
83
  if current_chunk:
84
  chunks.append("\n".join(current_chunk))
 
85
  return chunks
86
 
87
  def build_prompt_from_text(chunk: str) -> str:
 
107
  def init_agent():
108
  default_tool_path = os.path.abspath("data/new_tool.json")
109
  target_tool_path = os.path.join(tool_cache_dir, "new_tool.json")
 
110
  if not os.path.exists(target_tool_path):
111
  shutil.copy(default_tool_path, target_tool_path)
 
112
  agent = TxAgent(
113
  model_name="mims-harvard/TxAgent-T1-Llama-3.1-8B",
114
  rag_model_name="mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B",
 
133
  try:
134
  messages.append({"role": "user", "content": f"Processing Excel file: {os.path.basename(file.name)}"})
135
  messages.append({"role": "assistant", "content": "⏳ Extracting and analyzing data..."})
 
136
  extracted_text = extract_text_from_excel(file.name)
137
  chunks = split_text_into_chunks(extracted_text)
138
  chunk_responses = [None] * len(chunks)
 
210
  except Exception as e:
211
  messages.append({"role": "assistant", "content": f"❌ Error summarizing intermediate results: {str(e)}"})
212
  return messages, report_path
 
213
  summary += f"\n\n### Chunk {i+1} Analysis\n{response}"
214
  current_summary_tokens += response_tokens
215
 
 
258
  def create_ui(agent):
259
  with gr.Blocks(
260
  title="Patient History Chat",
261
+ css="""
262
  .gradio-container {
263
  max-width: 900px !important;
264
  margin: auto;
 
293
  padding-left: 1.2em;
294
  margin: 0.4em 0;
295
  }
296
+ """
297
  ) as demo:
298
+ gr.Markdown("""
299
+ <h2 style='color:#182848'>🏥 Patient History Analysis Tool</h2>
300
+ <p style='color:#444;'>Upload an Excel file containing clinical data. The assistant will analyze it for patterns, inconsistencies, and recommendations.</p>
301
+ """)
302
 
303
  with gr.Row():
304
  with gr.Column(scale=3):
305
  chatbot = gr.Chatbot(
306
+ label="Clinical Assistant",
307
  show_copy_button=True,
308
  height=600,
309
+ type="messages",
310
+ avatar_images=(None, "https://i.imgur.com/6wX7Zb4.png"),
311
  render_markdown=True
312
  )
313
  with gr.Column(scale=1):
314
+ file_upload = gr.File(label="Upload Excel File", file_types=[".xlsx"], height=100)
315
+ analyze_btn = gr.Button("🧠 Analyze Patient History", variant="primary", elem_classes="primary")
316
+ report_output = gr.File(label="Download Report", visible=False, interactive=False)
317
 
318
  chatbot_state = gr.State(value=[])
319
 
 
321
  messages, report_path = process_final_report(agent, file, current_state)
322
  formatted_messages = []
323
  for msg in messages:
324
+ role = msg.get("role")
325
+ content = msg.get("content", "")
326
+ if role == "assistant":
327
+ content = content.replace("- ", "\n- ")
328
+ content = f"<div class='chat-message-content'>{content}</div>"
329
+ formatted_messages.append({"role": role, "content": content})
330
  report_update = gr.update(visible=report_path is not None, value=report_path)
331
  return formatted_messages, report_update, formatted_messages
332
 
333
+ analyze_btn.click(fn=update_ui, inputs=[file_upload, chatbot_state], outputs=[chatbot, report_output, chatbot_state], api_name="analyze")
334
 
335
  return demo
336
 
337
+ if __name__ == "__main__":
338
  try:
339
  agent = init_agent()
340
  demo = create_ui(agent)
341
+ demo.launch(server_name="0.0.0.0", server_port=7860, show_error=True, allowed_paths=["/data/hf_cache/reports"], share=False)
342
  except Exception as e:
343
+ print(f"Error: {str(e)}")
344
  sys.exit(1)