Ali2206 commited on
Commit
70f70c1
Β·
verified Β·
1 Parent(s): 3182c0a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -174,10 +174,14 @@ def process_chunk_sync(agent, chunk: str, chunk_idx: int) -> Tuple[int, str]:
174
  return chunk_idx, ""
175
 
176
  async def process_file(agent: TxAgent, file_path: str) -> Generator[Tuple[List[Dict[str, str]], Union[str, None]], None, None]:
177
- """Process the file with improved error handling and vLLM stability"""
178
  messages = []
179
  report_path = None
180
 
 
 
 
 
 
181
  try:
182
  # Initial messages
183
  messages.append({"role": "user", "content": f"Processing file: {os.path.basename(file_path)}"})
@@ -191,13 +195,13 @@ async def process_file(agent: TxAgent, file_path: str) -> Generator[Tuple[List[D
191
  messages.append({"role": "assistant", "content": f"βœ… Extracted {len(chunks)} chunks in {time.time()-start_time:.1f}s"})
192
  yield messages, None
193
 
194
- # Process chunks sequentially to avoid vLLM socket issues
195
  chunk_responses = []
196
  for idx, chunk in enumerate(chunks):
197
  messages.append({"role": "assistant", "content": f"πŸ” Processing chunk {idx+1}/{len(chunks)}..."})
198
  yield messages, None
199
 
200
- _, response = process_chunk_sync(agent, chunk, idx)
201
  chunk_responses.append(response)
202
 
203
  messages.append({"role": "assistant", "content": f"βœ… Chunk {idx+1} processed"})
@@ -245,7 +249,6 @@ async def process_file(agent: TxAgent, file_path: str) -> Generator[Tuple[List[D
245
  logger.error(f"Processing failed: {str(e)}")
246
  messages.append({"role": "assistant", "content": f"❌ Error: {str(e)}"})
247
  yield messages, None
248
-
249
  def create_ui(agent: TxAgent):
250
  """Create the Gradio interface with simplified interaction"""
251
  with gr.Blocks(title="Clinical Analysis", css=".gradio-container {max-width: 900px}") as demo:
@@ -275,7 +278,7 @@ def create_ui(agent: TxAgent):
275
  )
276
 
277
  analyze_btn.click(
278
- fn=lambda file: process_file(agent, file.name) if file else ([{"role": "assistant", "content": "❌ Please upload a file"}], None),
279
  inputs=[file_input],
280
  outputs=[chatbot, report_output],
281
  concurrency_limit=1 # Ensure sequential processing
 
174
  return chunk_idx, ""
175
 
176
  async def process_file(agent: TxAgent, file_path: str) -> Generator[Tuple[List[Dict[str, str]], Union[str, None]], None, None]:
 
177
  messages = []
178
  report_path = None
179
 
180
+ if file_path is None:
181
+ messages.append({"role": "assistant", "content": "❌ Please upload a valid Excel file before analyzing."})
182
+ yield messages, None
183
+ return
184
+
185
  try:
186
  # Initial messages
187
  messages.append({"role": "user", "content": f"Processing file: {os.path.basename(file_path)}"})
 
195
  messages.append({"role": "assistant", "content": f"βœ… Extracted {len(chunks)} chunks in {time.time()-start_time:.1f}s"})
196
  yield messages, None
197
 
198
+ # Process chunks sequentially
199
  chunk_responses = []
200
  for idx, chunk in enumerate(chunks):
201
  messages.append({"role": "assistant", "content": f"πŸ” Processing chunk {idx+1}/{len(chunks)}..."})
202
  yield messages, None
203
 
204
+ _, response = await process_chunk(agent, chunk, idx)
205
  chunk_responses.append(response)
206
 
207
  messages.append({"role": "assistant", "content": f"βœ… Chunk {idx+1} processed"})
 
249
  logger.error(f"Processing failed: {str(e)}")
250
  messages.append({"role": "assistant", "content": f"❌ Error: {str(e)}"})
251
  yield messages, None
 
252
  def create_ui(agent: TxAgent):
253
  """Create the Gradio interface with simplified interaction"""
254
  with gr.Blocks(title="Clinical Analysis", css=".gradio-container {max-width: 900px}") as demo:
 
278
  )
279
 
280
  analyze_btn.click(
281
+ fn=lambda file: process_file(agent, file.name if file else None),
282
  inputs=[file_input],
283
  outputs=[chatbot, report_output],
284
  concurrency_limit=1 # Ensure sequential processing