Ali2206 commited on
Commit
46fbf29
·
verified ·
1 Parent(s): a53de3c

Update src/txagent/txagent.py

Browse files
Files changed (1) hide show
  1. src/txagent/txagent.py +112 -505
src/txagent/txagent.py CHANGED
@@ -14,7 +14,7 @@ from .toolrag import ToolRAGModel
14
  import torch
15
  import logging
16
 
17
- # Configure logging with a more specific logger name
18
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
19
  logger = logging.getLogger("TxAgent")
20
 
@@ -74,19 +74,20 @@ class TxAgent:
74
  return f"The model {model_name} is already loaded."
75
  self.model_name = model_name
76
 
 
77
  self.model = LLM(
78
  model=self.model_name,
79
  dtype="float16",
80
  max_model_len=131072,
81
- max_num_batched_tokens=65536, # Increased for A100 80GB
82
- gpu_memory_utilization=0.95, # Higher utilization for better performance
83
  trust_remote_code=True
84
  )
85
  self.chat_template = Template(self.model.get_tokenizer().chat_template)
86
  self.tokenizer = self.model.get_tokenizer()
87
  logger.info(
88
  "Model %s loaded with max_model_len=%d, max_num_batched_tokens=%d, gpu_memory_utilization=%.2f",
89
- self.model_name, 131072, 32768, 0.9
90
  )
91
  return f"Model {model_name} loaded successfully."
92
 
@@ -106,7 +107,6 @@ class TxAgent:
106
  self.rag_model.load_tool_desc_embedding(self.tooluniverse)
107
  self.rag_model.save_embeddings(cache_path)
108
  logger.debug("Tool description embeddings loaded")
109
-
110
  def rag_infer(self, query, top_k=5):
111
  return self.rag_model.rag_infer(query, top_k)
112
 
@@ -136,29 +136,6 @@ class TxAgent:
136
  logger.debug("Conversation initialized with %d messages", len(conversation))
137
  return conversation
138
 
139
- def tool_RAG(self, message=None,
140
- picked_tool_names=None,
141
- existing_tools_prompt=[],
142
- rag_num=0,
143
- return_call_result=False):
144
- if not self.enable_rag:
145
- return []
146
- extra_factor = 10
147
- if picked_tool_names is None:
148
- assert picked_tool_names is not None or message is not None
149
- picked_tool_names = self.rag_infer(
150
- message, top_k=rag_num * extra_factor)
151
-
152
- picked_tool_names_no_special = [tool for tool in picked_tool_names if tool not in self.special_tools_name]
153
- picked_tool_names = picked_tool_names_no_special[:rag_num]
154
-
155
- picked_tools = self.tooluniverse.get_tool_by_name(picked_tool_names)
156
- picked_tools_prompt = self.tooluniverse.prepare_tool_prompts(picked_tools)
157
- logger.debug("Retrieved %d tools via RAG", len(picked_tools_prompt))
158
- if return_call_result:
159
- return picked_tools_prompt, picked_tool_names
160
- return picked_tools_prompt
161
-
162
  def add_special_tools(self, tools, call_agent=False):
163
  if self.enable_finish:
164
  tools.append(self.tooluniverse.get_one_tool_by_one_name('Finish', return_prompt=True))
@@ -168,11 +145,6 @@ class TxAgent:
168
  logger.debug("CallAgent tool added")
169
  return tools
170
 
171
- def add_finish_tools(self, tools):
172
- tools.append(self.tooluniverse.get_one_tool_by_one_name('Finish', return_prompt=True))
173
- logger.debug("Finish tool added")
174
- return tools
175
-
176
  def set_system_prompt(self, conversation, sys_prompt):
177
  if not conversation:
178
  conversation.append({"role": "system", "content": sys_prompt})
@@ -180,246 +152,12 @@ class TxAgent:
180
  conversation[0] = {"role": "system", "content": sys_prompt}
181
  return conversation
182
 
183
- def run_function_call(self, fcall_str,
184
- return_message=False,
185
- existing_tools_prompt=None,
186
- message_for_call_agent=None,
187
- call_agent=False,
188
- call_agent_level=None,
189
- temperature=None):
190
- try:
191
- function_call_json, message = self.tooluniverse.extract_function_call_json(
192
- fcall_str, return_message=return_message, verbose=False)
193
- except Exception as e:
194
- logger.error("Tool call parsing failed: %s", e)
195
- function_call_json = []
196
- message = fcall_str
197
-
198
- call_results = []
199
- special_tool_call = ''
200
- if function_call_json:
201
- if isinstance(function_call_json, list):
202
- for i in range(len(function_call_json)):
203
- logger.info("Tool Call: %s", function_call_json[i])
204
- if function_call_json[i]["name"] == 'Finish':
205
- special_tool_call = 'Finish'
206
- break
207
- elif function_call_json[i]["name"] == 'CallAgent':
208
- if call_agent_level < 2 and call_agent:
209
- solution_plan = function_call_json[i]['arguments']['solution']
210
- full_message = (
211
- message_for_call_agent +
212
- "\nYou must follow the following plan to answer the question: " +
213
- str(solution_plan)
214
- )
215
- call_result = self.run_multistep_agent(
216
- full_message, temperature=temperature,
217
- max_new_tokens=512, max_token=131072,
218
- call_agent=False, call_agent_level=call_agent_level)
219
- if call_result is None:
220
- call_result = "⚠️ No content returned from sub-agent."
221
- else:
222
- call_result = call_result.split('[FinalAnswer]')[-1].strip()
223
- else:
224
- call_result = "Error: CallAgent disabled."
225
- else:
226
- call_result = self.tooluniverse.run_one_function(function_call_json[i])
227
- call_id = self.tooluniverse.call_id_gen()
228
- function_call_json[i]["call_id"] = call_id
229
- logger.info("Tool Call Result: %s", call_result)
230
- call_results.append({
231
- "role": "tool",
232
- "content": json.dumps({"tool_name": function_call_json[i]["name"], "content": call_result, "call_id": call_id})
233
- })
234
- else:
235
- call_results.append({
236
- "role": "tool",
237
- "content": json.dumps({"content": "Invalid or no function call detected."})
238
- })
239
-
240
- revised_messages = [{
241
- "role": "assistant",
242
- "content": message.strip(),
243
- "tool_calls": json.dumps(function_call_json)
244
- }] + call_results
245
- return revised_messages, existing_tools_prompt, special_tool_call
246
-
247
- def run_function_call_stream(self, fcall_str,
248
- return_message=False,
249
- existing_tools_prompt=None,
250
- message_for_call_agent=None,
251
- call_agent=False,
252
- call_agent_level=None,
253
- temperature=None,
254
- return_gradio_history=True):
255
- try:
256
- function_call_json, message = self.tooluniverse.extract_function_call_json(
257
- fcall_str, return_message=return_message, verbose=False)
258
- except Exception as e:
259
- logger.error("Tool call parsing failed: %s", e)
260
- function_call_json = []
261
- message = fcall_str
262
-
263
- call_results = []
264
- special_tool_call = ''
265
- if return_gradio_history:
266
- gradio_history = []
267
- if function_call_json:
268
- if isinstance(function_call_json, list):
269
- for i in range(len(function_call_json)):
270
- if function_call_json[i]["name"] == 'Finish':
271
- special_tool_call = 'Finish'
272
- break
273
- elif function_call_json[i]["name"] == 'DirectResponse':
274
- call_result = function_call_json[i]['arguments']['respose']
275
- special_tool_call = 'DirectResponse'
276
- elif function_call_json[i]["name"] == 'RequireClarification':
277
- call_result = function_call_json[i]['arguments']['unclear_question']
278
- special_tool_call = 'RequireClarification'
279
- elif function_call_json[i]["name"] == 'CallAgent':
280
- if call_agent_level < 2 and call_agent:
281
- solution_plan = function_call_json[i]['arguments']['solution']
282
- full_message = (
283
- message_for_call_agent +
284
- "\nYou must follow the following plan to answer the question: " +
285
- str(solution_plan)
286
- )
287
- sub_agent_task = "Sub TxAgent plan: " + str(solution_plan)
288
- call_result = yield from self.run_gradio_chat(
289
- full_message, history=[], temperature=temperature,
290
- max_new_tokens=512, max_token=131072,
291
- call_agent=False, call_agent_level=call_agent_level,
292
- conversation=None, sub_agent_task=sub_agent_task)
293
- if call_result is not None and isinstance(call_result, str):
294
- call_result = call_result.split('[FinalAnswer]')[-1]
295
- else:
296
- call_result = "⚠️ No content returned from sub-agent."
297
- else:
298
- call_result = "Error: CallAgent disabled."
299
- else:
300
- call_result = self.tooluniverse.run_one_function(function_call_json[i])
301
- call_id = self.tooluniverse.call_id_gen()
302
- function_call_json[i]["call_id"] = call_id
303
- call_results.append({
304
- "role": "tool",
305
- "content": json.dumps({"tool_name": function_call_json[i]["name"], "content": call_result, "call_id": call_id})
306
- })
307
- if return_gradio_history and function_call_json[i]["name"] != 'Finish':
308
- metadata = {"title": f"🧰 {function_call_json[i]['name']}", "log": str(function_call_json[i]['arguments'])}
309
- gradio_history.append(ChatMessage(role="assistant", content=str(call_result), metadata=metadata))
310
- else:
311
- call_results.append({
312
- "role": "tool",
313
- "content": json.dumps({"content": "Invalid or no function call detected."})
314
- })
315
-
316
- revised_messages = [{
317
- "role": "assistant",
318
- "content": message.strip(),
319
- "tool_calls": json.dumps(function_call_json)
320
- }] + call_results
321
- if return_gradio_history:
322
- return revised_messages, existing_tools_prompt, special_tool_call, gradio_history
323
- return revised_messages, existing_tools_prompt, special_tool_call
324
-
325
- def get_answer_based_on_unfinished_reasoning(self, conversation, temperature, max_new_tokens, max_token, outputs=None):
326
- if conversation[-1]['role'] == 'assistant':
327
- conversation.append(
328
- {'role': 'tool', 'content': 'Errors occurred during function call; provide final answer with current information.'})
329
- finish_tools_prompt = self.add_finish_tools([])
330
- last_outputs_str = self.llm_infer(
331
- messages=conversation,
332
- temperature=temperature,
333
- tools=finish_tools_prompt,
334
- output_begin_string='[FinalAnswer]',
335
- skip_special_tokens=True,
336
- max_new_tokens=max_new_tokens,
337
- max_token=max_token)
338
- logger.info("Unfinished reasoning answer: %s", last_outputs_str[:100])
339
- return last_outputs_str
340
-
341
- def run_multistep_agent(self, message: str,
342
- temperature: float,
343
- max_new_tokens: int,
344
- max_token: int,
345
- max_round: int = 5,
346
- call_agent=False,
347
- call_agent_level=0):
348
- logger.info("Starting multistep agent for message: %s", message[:100])
349
- picked_tools_prompt, call_agent_level = self.initialize_tools_prompt(
350
- call_agent, call_agent_level, message)
351
- conversation = self.initialize_conversation(message)
352
- outputs = []
353
- last_outputs = []
354
- next_round = True
355
- current_round = 0
356
- token_overflow = False
357
- enable_summary = False
358
- last_status = {}
359
-
360
- while next_round and current_round < max_round:
361
- current_round += 1
362
- if len(outputs) > 0:
363
- function_call_messages, picked_tools_prompt, special_tool_call = self.run_function_call(
364
- last_outputs, return_message=True,
365
- existing_tools_prompt=picked_tools_prompt,
366
- message_for_call_agent=message,
367
- call_agent=call_agent,
368
- call_agent_level=call_agent_level,
369
- temperature=temperature)
370
-
371
- if special_tool_call == 'Finish':
372
- next_round = False
373
- conversation.extend(function_call_messages)
374
- content = function_call_messages[0]['content']
375
- if content is None:
376
- return "❌ No content returned after Finish tool call."
377
- return content.split('[FinalAnswer]')[-1]
378
-
379
- if (self.enable_summary or token_overflow) and not call_agent:
380
- enable_summary = True
381
- last_status = self.function_result_summary(
382
- conversation, status=last_status, enable_summary=enable_summary)
383
-
384
- if function_call_messages:
385
- conversation.extend(function_call_messages)
386
- outputs.append(tool_result_format(function_call_messages))
387
- else:
388
- next_round = False
389
- conversation.extend([{"role": "assistant", "content": ''.join(last_outputs)}])
390
- return ''.join(last_outputs).replace("</s>", "")
391
-
392
- last_outputs = []
393
- outputs.append("### TxAgent:\n")
394
- last_outputs_str, token_overflow = self.llm_infer(
395
- messages=conversation,
396
- temperature=temperature,
397
- tools=picked_tools_prompt,
398
- skip_special_tokens=False,
399
- max_new_tokens=2048,
400
- max_token=131072,
401
- check_token_status=True)
402
- if last_outputs_str is None:
403
- logger.warning("Token limit exceeded")
404
- if self.force_finish:
405
- return self.get_answer_based_on_unfinished_reasoning(
406
- conversation, temperature, max_new_tokens, max_token)
407
- return "❌ Token limit exceeded."
408
- last_outputs.append(last_outputs_str)
409
-
410
- if max_round == current_round:
411
- logger.warning("Max rounds exceeded")
412
- if self.force_finish:
413
- return self.get_answer_based_on_unfinished_reasoning(
414
- conversation, temperature, max_new_tokens, max_token)
415
- return None
416
-
417
  def build_logits_processor(self, messages, llm):
418
  logger.warning("Logits processor disabled due to vLLM V1 limitation")
419
  return None
420
 
421
- def llm_infer(self, messages, temperature=0.1, tools=None,
422
- output_begin_string=None, max_new_tokens=512,
423
  max_token=131072, skip_special_tokens=True,
424
  model=None, tokenizer=None, terminators=None,
425
  seed=None, check_token_status=False):
@@ -428,7 +166,7 @@ class TxAgent:
428
 
429
  logits_processor = self.build_logits_processor(messages, model)
430
  sampling_params = SamplingParams(
431
- temperature=temperature if temperature is not None else 0.0,
432
  max_tokens=max_new_tokens,
433
  seed=seed if seed is not None else self.seed,
434
  )
@@ -451,13 +189,9 @@ class TxAgent:
451
  output = model.generate(prompt, sampling_params=sampling_params)
452
  output_text = output[0].outputs[0].text
453
  output_tokens = len(self.tokenizer.encode(output_text, add_special_tokens=False))
454
- logger.debug("Inference output: %s (output tokens: %d)", output_text[:100], output_tokens)
455
- torch.cuda.empty_cache()
456
- gc.collect()
457
- if check_token_status and max_token is not None:
458
- return output_text, token_overflow
459
- return output_text
460
 
 
461
  def run_self_agent(self, message: str,
462
  temperature: float,
463
  max_new_tokens: int,
@@ -492,21 +226,10 @@ class TxAgent:
492
  max_new_tokens: int,
493
  max_token: int):
494
  logger.info("Starting format agent")
495
- if '[FinalAnswer]' in answer:
496
- possible_final_answer = answer.split("[FinalAnswer]")[-1]
497
- elif "\n\n" in answer:
498
- possible_final_answer = answer.split("\n\n")[-1]
499
- else:
500
- possible_final_answer = answer.strip()
501
- if len(possible_final_answer) == 1 and possible_final_answer in ['A', 'B', 'C', 'D', 'E']:
502
- return possible_final_answer
503
- elif len(possible_final_answer) > 1 and possible_final_answer[1] == ':' and possible_final_answer[0] in ['A', 'B', 'C', 'D', 'E']:
504
- return possible_final_answer[0]
505
-
506
  conversation = self.set_system_prompt(
507
  [], "Transform the agent's answer to a single letter: 'A', 'B', 'C', 'D'.")
508
- conversation.append({"role": "user", "content": message +
509
- "\nAgent's answer: " + answer + "\nAnswer (must be a letter):"})
510
  return self.llm_infer(
511
  messages=conversation,
512
  temperature=temperature,
@@ -514,108 +237,17 @@ class TxAgent:
514
  max_new_tokens=max_new_tokens,
515
  max_token=max_token)
516
 
517
- def run_summary_agent(self, thought_calls: str,
518
- function_response: str,
519
- temperature: float,
520
- max_new_tokens: int,
521
- max_token: int):
522
- logger.info("Summarizing tool result")
523
- prompt = f"""Thought and function calls:
524
- {thought_calls}
525
- Function calls' responses:
526
- \"\"\"
527
- {function_response}
528
- \"\"\"
529
- Summarize the function calls' l responses in one sentence with all necessary information.
530
- """
531
- conversation = [{"role": "user", "content": prompt}]
532
- output = self.llm_infer(
533
- messages=conversation,
534
- temperature=temperature,
535
- tools=None,
536
- max_new_tokens=max_new_tokens,
537
- max_token=max_token)
538
- if '[' in output:
539
- output = output.split('[')[0]
540
- return output
541
-
542
- def function_result_summary(self, input_list, status, enable_summary):
543
- if 'tool_call_step' not in status:
544
- status['tool_call_step'] = 0
545
- for idx in range(len(input_list)):
546
- pos_id = len(input_list) - idx - 1
547
- if input_list[pos_id]['role'] == 'assistant' and 'tool_calls' in input_list[pos_id]:
548
- break
549
-
550
- status['step'] = status.get('step', 0) + 1
551
- if not enable_summary:
552
- return status
553
-
554
- status['summarized_index'] = status.get('summarized_index', 0)
555
- status['summarized_step'] = status.get('summarized_step', 0)
556
- status['previous_length'] = status.get('previous_length', 0)
557
- status['history'] = status.get('history', [])
558
-
559
- function_response = ''
560
- idx = status['summarized_index']
561
- this_thought_calls = None
562
-
563
- while idx < len(input_list):
564
- if (self.summary_mode == 'step' and status['summarized_step'] < status['step'] - status['tool_call_step'] - self.summary_skip_last_k) or \
565
- (self.summary_mode == 'length' and status['previous_length'] > self.summary_context_length):
566
- if input_list[idx]['role'] == 'assistant':
567
- if function_response:
568
- status['summarized_step'] += 1
569
- result_summary = self.run_summary_agent(
570
- thought_calls=this_thought_calls,
571
- function_response=function_response,
572
- temperature=0.1,
573
- max_new_tokens=512,
574
- max_token=131072)
575
- input_list.insert(last_call_idx + 1, {'role': 'tool', 'content': result_summary})
576
- status['summarized_index'] = last_call_idx + 2
577
- idx += 1
578
- last_call_idx = idx
579
- this_thought_calls = input_list[idx]['content'] + input_list[idx]['tool_calls']
580
- function_response = ''
581
- elif input_list[idx]['role'] == 'tool' and this_thought_calls is not None:
582
- function_response += input_list[idx]['content']
583
- del input_list[idx]
584
- idx -= 1
585
- else:
586
- break
587
- idx += 1
588
-
589
- if function_response:
590
- status['summarized_step'] += 1
591
- result_summary = self.run_summary_agent(
592
- thought_calls=this_thought_calls,
593
- function_response=function_response,
594
- temperature=0.1,
595
- max_new_tokens=512,
596
- max_token=131072)
597
- tool_calls = json.loads(input_list[last_call_idx]['tool_calls'])
598
- for tool_call in tool_calls:
599
- del tool_call['call_id']
600
- input_list[last_call_idx]['tool_calls'] = json.dumps(tool_calls)
601
- input_list.insert(last_call_idx + 1, {'role': 'tool', 'content': result_summary})
602
- status['summarized_index'] = last_call_idx + 2
603
-
604
- return status
605
-
606
- def update_parameters(self, **kwargs):
607
- updated_attributes = {}
608
- for key, value in kwargs.items():
609
- if hasattr(self, key):
610
- setattr(self, key, value)
611
- updated_attributes[key] = value
612
- logger.info("Updated parameters: %s", updated_attributes)
613
- return updated_attributes
614
 
615
  def run_gradio_chat(self, message: str,
616
  history: list,
617
  temperature: float,
618
- max_new_tokens: int = 2048,
619
  max_token: int = 131072,
620
  call_agent: bool = False,
621
  conversation: gr.State = None,
@@ -624,71 +256,38 @@ Summarize the function calls' l responses in one sentence with all necessary in
624
  call_agent_level: int = 0,
625
  sub_agent_task: str = None,
626
  uploaded_files: list = None):
627
- logger.info("Chat started, message: %s", message[:100])
628
  if not message or len(message.strip()) < 5:
629
  yield "Please provide a valid message or upload files to analyze."
630
  return
631
 
632
- picked_tools_prompt, call_agent_level = self.initialize_tools_prompt(
633
- call_agent, call_agent_level, message)
634
- conversation = self.initialize_conversation(
635
- message, conversation, history)
636
- history = []
637
- last_outputs = []
638
 
 
 
 
639
  next_round = True
640
  current_round = 0
641
- enable_summary = False
642
- last_status = {}
643
- token_overflow = False
644
 
645
  try:
646
  while next_round and current_round < max_round:
647
  current_round += 1
648
- logger.debug("Starting round %d/%d", current_round, max_round)
 
649
  if last_outputs:
650
- function_call_messages, picked_tools_prompt, special_tool_call, current_gradio_history = yield from self.run_function_call_stream(
651
- last_outputs, return_message=True,
652
- existing_tools_prompt=picked_tools_prompt,
653
- message_for_call_agent=message,
654
- call_agent=call_agent,
655
- call_agent_level=call_agent_level,
656
- temperature=temperature)
657
- history.extend(current_gradio_history)
658
 
 
 
659
  if special_tool_call == 'Finish':
660
- logger.info("Finish tool called, ending chat")
661
- yield history
662
- next_round = False
663
- conversation.extend(function_call_messages)
664
- content = function_call_messages[0]['content']
665
- if content:
666
- return content
667
- return "No content returned after Finish tool call."
668
-
669
- elif special_tool_call in ['RequireClarification', 'DirectResponse']:
670
- last_msg = history[-1] if history else ChatMessage(role="assistant", content="Response needed.")
671
- history.append(ChatMessage(role="assistant", content=last_msg.content))
672
- logger.info("Special tool %s called, ending chat", special_tool_call)
673
- yield history
674
- next_round = False
675
- return last_msg.content
676
-
677
- if (self.enable_summary or token_overflow) and not call_agent:
678
- enable_summary = True
679
- last_status = self.function_result_summary(
680
- conversation, status=last_status, enable_summary=enable_summary)
681
-
682
- if function_call_messages:
683
- conversation.extend(function_call_messages)
684
- yield history
685
- else:
686
  next_round = False
687
- conversation.append({"role": "assistant", "content": ''.join(last_outputs)})
688
- logger.info("No function call messages, ending chat")
689
- return ''.join(last_outputs).replace("</s>", "")
690
 
691
- last_outputs = []
692
  last_outputs_str, token_overflow = self.llm_infer(
693
  messages=conversation,
694
  temperature=temperature,
@@ -700,95 +299,103 @@ Summarize the function calls' l responses in one sentence with all necessary in
700
  check_token_status=True)
701
 
702
  if last_outputs_str is None:
703
- logger.warning("Token limit exceeded")
704
  if self.force_finish:
705
  last_outputs_str = self.get_answer_based_on_unfinished_reasoning(
706
  conversation, temperature, max_new_tokens, max_token)
707
- history.append(ChatMessage(role="assistant", content=last_outputs_str.strip()))
708
- yield history
709
  return last_outputs_str
710
- error_msg = "Token limit exceeded."
711
- history.append(ChatMessage(role="assistant", content=error_msg))
712
- yield history
713
- return error_msg
714
-
715
- last_thought = last_outputs_str.split("[TOOL_CALLS]")[0]
716
- for msg in history:
717
- if msg.metadata is not None:
718
- msg.metadata['status'] = 'done'
719
-
720
- if '[FinalAnswer]' in last_thought:
721
- parts = last_thought.split('[FinalAnswer]', 1)
722
- final_thought, final_answer = parts if len(parts) == 2 else (last_thought, "")
723
- history.append(ChatMessage(role="assistant", content=final_thought.strip()))
724
- yield history
725
- history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
726
- logger.info("Final answer provided: %s", final_answer[:100])
727
- yield history
728
- next_round = False # Ensure we exit after final answer
729
- return final_answer
730
- else:
731
- history.append(ChatMessage(role="assistant", content=last_thought))
732
- yield history
733
 
734
  last_outputs.append(last_outputs_str)
735
 
736
- if next_round:
737
- if self.force_finish:
738
- last_outputs_str = self.get_answer_based_on_unfinished_reasoning(
739
- conversation, temperature, max_new_tokens, max_token)
740
- parts = last_outputs_str.split('[FinalAnswer]', 1)
741
- final_thought, final_answer = parts if len(parts) == 2 else (last_outputs_str, "")
742
- history.append(ChatMessage(role="assistant", content=final_thought.strip()))
743
- yield history
744
- history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
745
- logger.info("Forced final answer: %s", final_answer[:100])
746
- yield history
747
- return final_answer
748
- else:
749
- error_msg = "Reasoning rounds exceeded limit."
750
- history.append(ChatMessage(role="assistant", content=error_msg))
751
- yield history
752
- return error_msg
753
 
754
  except Exception as e:
755
- logger.error("Exception in run_gradio_chat: %s", e, exc_info=True)
756
- error_msg = f"Error: {e}"
757
- history.append(ChatMessage(role="assistant", content=error_msg))
758
- yield history
759
- if self.force_finish:
760
- last_outputs_str = self.get_answer_based_on_unfinished_reasoning(
761
- conversation, temperature, max_new_tokens, max_token)
762
- parts = last_outputs_str.split('[FinalAnswer]', 1)
763
- final_thought, final_answer = parts if len(parts) == 2 else (last_outputs_str, "")
764
- history.append(ChatMessage(role="assistant", content=final_thought.strip()))
765
- yield history
766
- history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
767
- logger.info("Forced final answer after error: %s", final_answer[:100])
768
- yield history
769
- return final_answer
770
- return error_msg
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
771
 
772
  def run_gradio_chat_batch(self, messages: List[str],
773
  temperature: float,
774
- max_new_tokens: int = 2048,
775
  max_token: int = 131072,
776
  call_agent: bool = False,
777
  conversation: List = None,
778
  max_round: int = 5,
779
  seed: int = None,
780
  call_agent_level: int = 0):
781
- """Run batch inference for multiple messages."""
782
  logger.info("Starting batch chat for %d messages", len(messages))
783
  batch_results = []
784
-
785
  for message in messages:
786
- # Initialize conversation for each message
787
  conv = self.initialize_conversation(message, conversation, history=None)
788
  picked_tools_prompt, call_agent_level = self.initialize_tools_prompt(
789
  call_agent, call_agent_level, message)
790
-
791
- # Run single inference for simplicity (extend for multi-round if needed)
792
  output, token_overflow = self.llm_infer(
793
  messages=conv,
794
  temperature=temperature,
@@ -799,12 +406,12 @@ Summarize the function calls' l responses in one sentence with all necessary in
799
  seed=seed,
800
  check_token_status=True
801
  )
802
-
803
  if output is None:
804
  logger.warning("Token limit exceeded for message: %s", message[:100])
805
- batch_results.append("Token limit exceeded.")
806
  else:
807
  batch_results.append(output)
808
-
809
  logger.info("Batch chat completed for %d messages", len(messages))
810
- return batch_results
 
14
  import torch
15
  import logging
16
 
17
+ # Configure logging
18
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
19
  logger = logging.getLogger("TxAgent")
20
 
 
74
  return f"The model {model_name} is already loaded."
75
  self.model_name = model_name
76
 
77
+ # OPTIMIZED: maximize batching for big GPU
78
  self.model = LLM(
79
  model=self.model_name,
80
  dtype="float16",
81
  max_model_len=131072,
82
+ max_num_batched_tokens=65536, # Bigger batch size
83
+ gpu_memory_utilization=0.95, # Use 95% of GPU
84
  trust_remote_code=True
85
  )
86
  self.chat_template = Template(self.model.get_tokenizer().chat_template)
87
  self.tokenizer = self.model.get_tokenizer()
88
  logger.info(
89
  "Model %s loaded with max_model_len=%d, max_num_batched_tokens=%d, gpu_memory_utilization=%.2f",
90
+ self.model_name, 131072, 65536, 0.95
91
  )
92
  return f"Model {model_name} loaded successfully."
93
 
 
107
  self.rag_model.load_tool_desc_embedding(self.tooluniverse)
108
  self.rag_model.save_embeddings(cache_path)
109
  logger.debug("Tool description embeddings loaded")
 
110
  def rag_infer(self, query, top_k=5):
111
  return self.rag_model.rag_infer(query, top_k)
112
 
 
136
  logger.debug("Conversation initialized with %d messages", len(conversation))
137
  return conversation
138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  def add_special_tools(self, tools, call_agent=False):
140
  if self.enable_finish:
141
  tools.append(self.tooluniverse.get_one_tool_by_one_name('Finish', return_prompt=True))
 
145
  logger.debug("CallAgent tool added")
146
  return tools
147
 
 
 
 
 
 
148
  def set_system_prompt(self, conversation, sys_prompt):
149
  if not conversation:
150
  conversation.append({"role": "system", "content": sys_prompt})
 
152
  conversation[0] = {"role": "system", "content": sys_prompt}
153
  return conversation
154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  def build_logits_processor(self, messages, llm):
156
  logger.warning("Logits processor disabled due to vLLM V1 limitation")
157
  return None
158
 
159
+ def llm_infer(self, messages, temperature=0.0, tools=None,
160
+ output_begin_string=None, max_new_tokens=4096,
161
  max_token=131072, skip_special_tokens=True,
162
  model=None, tokenizer=None, terminators=None,
163
  seed=None, check_token_status=False):
 
166
 
167
  logits_processor = self.build_logits_processor(messages, model)
168
  sampling_params = SamplingParams(
169
+ temperature=temperature, # Force deterministic
170
  max_tokens=max_new_tokens,
171
  seed=seed if seed is not None else self.seed,
172
  )
 
189
  output = model.generate(prompt, sampling_params=sampling_params)
190
  output_text = output[0].outputs[0].text
191
  output_tokens = len(self.tokenizer.encode(output_text, add_special_tokens=False))
192
+ logger.debug("Inference output (truncated): %s (output tokens: %d)", output_text[:120], output_tokens)
 
 
 
 
 
193
 
194
+ return output_text if not check_token_status else (output_text, token_overflow)
195
  def run_self_agent(self, message: str,
196
  temperature: float,
197
  max_new_tokens: int,
 
226
  max_new_tokens: int,
227
  max_token: int):
228
  logger.info("Starting format agent")
229
+ possible_final_answer = answer.split("[FinalAnswer]")[-1] if "[FinalAnswer]" in answer else answer.strip()
 
 
 
 
 
 
 
 
 
 
230
  conversation = self.set_system_prompt(
231
  [], "Transform the agent's answer to a single letter: 'A', 'B', 'C', 'D'.")
232
+ conversation.append({"role": "user", "content": message + "\nAgent's answer: " + possible_final_answer})
 
233
  return self.llm_infer(
234
  messages=conversation,
235
  temperature=temperature,
 
237
  max_new_tokens=max_new_tokens,
238
  max_token=max_token)
239
 
240
+ def build_chat_messages(self, history, user_message):
241
+ conversation = [{"role": "system", "content": self.prompt_multi_step}]
242
+ for item in history:
243
+ conversation.append(item)
244
+ conversation.append({"role": "user", "content": user_message})
245
+ return conversation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
 
247
  def run_gradio_chat(self, message: str,
248
  history: list,
249
  temperature: float,
250
+ max_new_tokens: int = 4096,
251
  max_token: int = 131072,
252
  call_agent: bool = False,
253
  conversation: gr.State = None,
 
256
  call_agent_level: int = 0,
257
  sub_agent_task: str = None,
258
  uploaded_files: list = None):
259
+ logger.info("Chat started, message: %s", message[:120])
260
  if not message or len(message.strip()) < 5:
261
  yield "Please provide a valid message or upload files to analyze."
262
  return
263
 
264
+ picked_tools_prompt, call_agent_level = self.initialize_tools_prompt(call_agent, call_agent_level, message)
265
+ conversation = self.initialize_conversation(message, conversation, history)
 
 
 
 
266
 
267
+ outputs = []
268
+ last_outputs = []
269
+ token_overflow = False
270
  next_round = True
271
  current_round = 0
 
 
 
272
 
273
  try:
274
  while next_round and current_round < max_round:
275
  current_round += 1
276
+ logger.info("Starting round %d/%d", current_round, max_round)
277
+
278
  if last_outputs:
279
+ function_call_messages, picked_tools_prompt, special_tool_call = self.tool_function_handling(
280
+ last_outputs, picked_tools_prompt, message, call_agent, call_agent_level, temperature)
 
 
 
 
 
 
281
 
282
+ conversation.extend(function_call_messages)
283
+ outputs.append(tool_result_format(function_call_messages))
284
  if special_tool_call == 'Finish':
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285
  next_round = False
286
+ final_content = function_call_messages[0]["content"]
287
+ yield final_content.split('[FinalAnswer]')[-1] if final_content else "⚠️ No content returned."
288
+ return
289
 
290
+ logger.info("Running model inference...")
291
  last_outputs_str, token_overflow = self.llm_infer(
292
  messages=conversation,
293
  temperature=temperature,
 
299
  check_token_status=True)
300
 
301
  if last_outputs_str is None:
 
302
  if self.force_finish:
303
  last_outputs_str = self.get_answer_based_on_unfinished_reasoning(
304
  conversation, temperature, max_new_tokens, max_token)
305
+ yield last_outputs_str
 
306
  return last_outputs_str
307
+ yield "⚠️ Token limit exceeded."
308
+ return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309
 
310
  last_outputs.append(last_outputs_str)
311
 
312
+ if "[FinalAnswer]" in last_outputs_str:
313
+ final_answer = last_outputs_str.split("[FinalAnswer]")[-1].strip()
314
+ logger.info("Final answer provided: %s", final_answer[:100])
315
+ yield final_answer
316
+ return
 
 
 
 
 
 
 
 
 
 
 
 
317
 
318
  except Exception as e:
319
+ logger.error("Exception in run_gradio_chat: %s", e)
320
+ yield f"⚠️ Error occurred: {e}"
321
+ def tool_function_handling(self, last_outputs, picked_tools_prompt, message, call_agent, call_agent_level, temperature):
322
+ try:
323
+ from .utils import extract_function_call_json
324
+ function_call_json, _ = extract_function_call_json(last_outputs[-1], return_message=True)
325
+ except Exception as e:
326
+ logger.warning("Tool call extraction failed: %s", e)
327
+ function_call_json = []
328
+
329
+ function_call_messages = []
330
+ special_tool_call = ''
331
+ if function_call_json:
332
+ for call in function_call_json:
333
+ if call['name'] == 'Finish':
334
+ special_tool_call = 'Finish'
335
+ # Can handle other special tools here
336
+
337
+ return function_call_messages, picked_tools_prompt, special_tool_call
338
+
339
+ def run_multistep_agent(self, message: str,
340
+ temperature: float,
341
+ max_new_tokens: int,
342
+ max_token: int,
343
+ max_round: int = 5,
344
+ call_agent=False,
345
+ call_agent_level=0):
346
+ logger.info("Starting multistep agent for message: %s", message[:100])
347
+ picked_tools_prompt, call_agent_level = self.initialize_tools_prompt(call_agent, call_agent_level, message)
348
+ conversation = self.initialize_conversation(message)
349
+
350
+ outputs = []
351
+ last_outputs = []
352
+ next_round = True
353
+ current_round = 0
354
+
355
+ while next_round and current_round < max_round:
356
+ current_round += 1
357
+ last_outputs_str, token_overflow = self.llm_infer(
358
+ messages=conversation,
359
+ temperature=temperature,
360
+ tools=picked_tools_prompt,
361
+ skip_special_tokens=False,
362
+ max_new_tokens=max_new_tokens,
363
+ max_token=max_token,
364
+ seed=self.seed,
365
+ check_token_status=True)
366
+
367
+ if last_outputs_str is None:
368
+ logger.warning("Token limit exceeded inside multistep agent")
369
+ return "⚠️ Token overflow."
370
+
371
+ outputs.append(last_outputs_str)
372
+
373
+ if "[FinalAnswer]" in last_outputs_str:
374
+ logger.info("Multistep Final Answer Provided")
375
+ return last_outputs_str.split("[FinalAnswer]")[-1]
376
+
377
+ last_outputs = [last_outputs_str]
378
+
379
+ return "⚠️ Max rounds exceeded."
380
 
381
  def run_gradio_chat_batch(self, messages: List[str],
382
  temperature: float,
383
+ max_new_tokens: int = 4096,
384
  max_token: int = 131072,
385
  call_agent: bool = False,
386
  conversation: List = None,
387
  max_round: int = 5,
388
  seed: int = None,
389
  call_agent_level: int = 0):
390
+ """Batch processing optimization."""
391
  logger.info("Starting batch chat for %d messages", len(messages))
392
  batch_results = []
393
+
394
  for message in messages:
 
395
  conv = self.initialize_conversation(message, conversation, history=None)
396
  picked_tools_prompt, call_agent_level = self.initialize_tools_prompt(
397
  call_agent, call_agent_level, message)
398
+
 
399
  output, token_overflow = self.llm_infer(
400
  messages=conv,
401
  temperature=temperature,
 
406
  seed=seed,
407
  check_token_status=True
408
  )
409
+
410
  if output is None:
411
  logger.warning("Token limit exceeded for message: %s", message[:100])
412
+ batch_results.append("⚠️ Token limit exceeded.")
413
  else:
414
  batch_results.append(output)
415
+
416
  logger.info("Batch chat completed for %d messages", len(messages))
417
+ return batch_results