Update src/txagent/txagent.py
Browse files- src/txagent/txagent.py +37 -33
src/txagent/txagent.py
CHANGED
@@ -755,17 +755,17 @@ Generate **one summarized sentence** about "function calls' responses" with nece
|
|
755 |
return updated_attributes
|
756 |
|
757 |
def run_gradio_chat(self, message: str,
|
758 |
-
|
759 |
-
|
760 |
-
|
761 |
-
|
762 |
-
|
763 |
-
|
764 |
-
|
765 |
-
|
766 |
-
|
767 |
-
|
768 |
-
|
769 |
"""
|
770 |
Generate a streaming response using the loaded model.
|
771 |
Args:
|
@@ -783,7 +783,7 @@ Generate **one summarized sentence** about "function calls' responses" with nece
|
|
783 |
yield "Please provide a valid message or upload files to analyze."
|
784 |
return "Invalid input."
|
785 |
|
786 |
-
if message.startswith("[
|
787 |
return ""
|
788 |
|
789 |
outputs = []
|
@@ -832,15 +832,13 @@ Generate **one summarized sentence** about "function calls' responses" with nece
|
|
832 |
next_round = False
|
833 |
conversation.extend(function_call_messages)
|
834 |
return function_call_messages[0]['content']
|
|
|
835 |
elif special_tool_call in ['RequireClarification', 'DirectResponse']:
|
836 |
-
if history
|
837 |
-
|
838 |
-
|
839 |
-
|
840 |
-
|
841 |
-
else:
|
842 |
-
yield "I need more information to proceed."
|
843 |
-
return "Missing data."
|
844 |
|
845 |
if (self.enable_summary or token_overflow) and not call_agent:
|
846 |
enable_summary = True
|
@@ -882,11 +880,13 @@ Generate **one summarized sentence** about "function calls' responses" with nece
|
|
882 |
|
883 |
if '[FinalAnswer]' in last_thought:
|
884 |
parts = last_thought.split('[FinalAnswer]', 1)
|
885 |
-
|
886 |
-
|
887 |
-
|
|
|
|
|
888 |
yield history
|
889 |
-
history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer))
|
890 |
yield history
|
891 |
else:
|
892 |
history.append(ChatMessage(role="assistant", content=last_thought))
|
@@ -900,11 +900,13 @@ Generate **one summarized sentence** about "function calls' responses" with nece
|
|
900 |
conversation, temperature, max_new_tokens, max_token)
|
901 |
if '[FinalAnswer]' in last_outputs_str:
|
902 |
parts = last_outputs_str.split('[FinalAnswer]', 1)
|
903 |
-
|
904 |
-
|
905 |
-
|
|
|
|
|
906 |
yield history
|
907 |
-
history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer))
|
908 |
yield history
|
909 |
else:
|
910 |
history.append(ChatMessage(role="assistant", content=last_outputs_str.strip()))
|
@@ -919,11 +921,13 @@ Generate **one summarized sentence** about "function calls' responses" with nece
|
|
919 |
conversation, temperature, max_new_tokens, max_token)
|
920 |
if '[FinalAnswer]' in last_outputs_str:
|
921 |
parts = last_outputs_str.split('[FinalAnswer]', 1)
|
922 |
-
|
923 |
-
|
924 |
-
|
|
|
|
|
925 |
yield history
|
926 |
-
history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer))
|
927 |
yield history
|
928 |
else:
|
929 |
-
yield f"An error occurred: {e}"
|
|
|
755 |
return updated_attributes
|
756 |
|
757 |
def run_gradio_chat(self, message: str,
|
758 |
+
history: list,
|
759 |
+
temperature: float,
|
760 |
+
max_new_tokens: int,
|
761 |
+
max_token: int,
|
762 |
+
call_agent: bool,
|
763 |
+
conversation: gr.State,
|
764 |
+
max_round: int = 20,
|
765 |
+
seed: int = None,
|
766 |
+
call_agent_level: int = 0,
|
767 |
+
sub_agent_task: str = None,
|
768 |
+
uploaded_files: list = None) -> str:
|
769 |
"""
|
770 |
Generate a streaming response using the loaded model.
|
771 |
Args:
|
|
|
783 |
yield "Please provide a valid message or upload files to analyze."
|
784 |
return "Invalid input."
|
785 |
|
786 |
+
if message.startswith("[\U0001f9f0 Tool_RAG") or message.startswith("⚒️"):
|
787 |
return ""
|
788 |
|
789 |
outputs = []
|
|
|
832 |
next_round = False
|
833 |
conversation.extend(function_call_messages)
|
834 |
return function_call_messages[0]['content']
|
835 |
+
|
836 |
elif special_tool_call in ['RequireClarification', 'DirectResponse']:
|
837 |
+
last_msg = history[-1] if history else ChatMessage(role="assistant", content="Response needed.")
|
838 |
+
history.append(ChatMessage(role="assistant", content=last_msg.content))
|
839 |
+
yield history
|
840 |
+
next_round = False
|
841 |
+
return last_msg.content
|
|
|
|
|
|
|
842 |
|
843 |
if (self.enable_summary or token_overflow) and not call_agent:
|
844 |
enable_summary = True
|
|
|
880 |
|
881 |
if '[FinalAnswer]' in last_thought:
|
882 |
parts = last_thought.split('[FinalAnswer]', 1)
|
883 |
+
if len(parts) == 2:
|
884 |
+
final_thought, final_answer = parts
|
885 |
+
else:
|
886 |
+
final_thought, final_answer = last_thought, ""
|
887 |
+
history.append(ChatMessage(role="assistant", content=final_thought.strip()))
|
888 |
yield history
|
889 |
+
history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
|
890 |
yield history
|
891 |
else:
|
892 |
history.append(ChatMessage(role="assistant", content=last_thought))
|
|
|
900 |
conversation, temperature, max_new_tokens, max_token)
|
901 |
if '[FinalAnswer]' in last_outputs_str:
|
902 |
parts = last_outputs_str.split('[FinalAnswer]', 1)
|
903 |
+
if len(parts) == 2:
|
904 |
+
final_thought, final_answer = parts
|
905 |
+
else:
|
906 |
+
final_thought, final_answer = last_outputs_str, ""
|
907 |
+
history.append(ChatMessage(role="assistant", content=final_thought.strip()))
|
908 |
yield history
|
909 |
+
history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
|
910 |
yield history
|
911 |
else:
|
912 |
history.append(ChatMessage(role="assistant", content=last_outputs_str.strip()))
|
|
|
921 |
conversation, temperature, max_new_tokens, max_token)
|
922 |
if '[FinalAnswer]' in last_outputs_str:
|
923 |
parts = last_outputs_str.split('[FinalAnswer]', 1)
|
924 |
+
if len(parts) == 2:
|
925 |
+
final_thought, final_answer = parts
|
926 |
+
else:
|
927 |
+
final_thought, final_answer = last_outputs_str, ""
|
928 |
+
history.append(ChatMessage(role="assistant", content=final_thought.strip()))
|
929 |
yield history
|
930 |
+
history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
|
931 |
yield history
|
932 |
else:
|
933 |
+
yield f"An error occurred: {e}"
|