Spaces:
Sleeping
Sleeping
Delanoe Pirard
commited on
Commit
·
68bd1d5
1
Parent(s):
56a91ed
cookies.txt
Browse files- agents/__pycache__/video_analyzer_agent.cpython-311.pyc +0 -0
- agents/advanced_validation_agent.py +4 -4
- agents/long_context_management_agent.py +2 -2
- agents/math_agent.py +2 -0
- agents/planner_agent.py +4 -4
- agents/reasoning_agent.py +2 -2
- agents/research_agent.py +4 -2
- agents/synthesis_agent.py +2 -0
- agents/text_analyzer_agent.py +2 -2
- agents/video_analyzer_agent.py +6 -2
- prompts/advanced_validation_agent_prompt.txt +2 -0
- prompts/code_gen_prompt.txt +2 -0
- prompts/figure_interpretation_agent_prompt.txt +1 -0
- prompts/image_analyzer_prompt.txt +1 -0
- prompts/long_context_management_agent_prompt.txt +1 -0
- prompts/planner_agent_prompt.txt +3 -1
- prompts/reasoning_agent_prompt.txt +1 -0
- prompts/text_analyzer_prompt.txt +1 -0
- prompts/video_analyzer_prompt.txt +1 -0
agents/__pycache__/video_analyzer_agent.cpython-311.pyc
CHANGED
Binary files a/agents/__pycache__/video_analyzer_agent.cpython-311.pyc and b/agents/__pycache__/video_analyzer_agent.cpython-311.pyc differ
|
|
agents/advanced_validation_agent.py
CHANGED
@@ -53,7 +53,7 @@ def cross_reference_check(claim: str, sources_content: List[Dict[str, str]]) ->
|
|
53 |
|
54 |
results = []
|
55 |
try:
|
56 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
57 |
logger.info(f"Using cross-referencing LLM: {llm_model}")
|
58 |
|
59 |
for i, source in enumerate(sources_content):
|
@@ -138,7 +138,7 @@ def logical_consistency_check(text: str) -> Dict[str, Union[bool, str, List[str]
|
|
138 |
)
|
139 |
|
140 |
try:
|
141 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
142 |
logger.info(f"Using consistency check LLM: {llm_model}")
|
143 |
response = llm.complete(prompt)
|
144 |
|
@@ -203,7 +203,7 @@ def bias_detection(text: str, source_context: Optional[str] = None) -> Dict[str,
|
|
203 |
)
|
204 |
|
205 |
try:
|
206 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
207 |
logger.info(f"Using bias detection LLM: {llm_model}")
|
208 |
response = llm.complete(prompt)
|
209 |
|
@@ -308,7 +308,7 @@ def initialize_advanced_validation_agent() -> ReActAgent:
|
|
308 |
raise ValueError("GEMINI_API_KEY must be set for AdvancedValidationAgent")
|
309 |
|
310 |
try:
|
311 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
312 |
logger.info(f"Using agent LLM: {agent_llm_model}")
|
313 |
|
314 |
# Load system prompt
|
|
|
53 |
|
54 |
results = []
|
55 |
try:
|
56 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
57 |
logger.info(f"Using cross-referencing LLM: {llm_model}")
|
58 |
|
59 |
for i, source in enumerate(sources_content):
|
|
|
138 |
)
|
139 |
|
140 |
try:
|
141 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
142 |
logger.info(f"Using consistency check LLM: {llm_model}")
|
143 |
response = llm.complete(prompt)
|
144 |
|
|
|
203 |
)
|
204 |
|
205 |
try:
|
206 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
207 |
logger.info(f"Using bias detection LLM: {llm_model}")
|
208 |
response = llm.complete(prompt)
|
209 |
|
|
|
308 |
raise ValueError("GEMINI_API_KEY must be set for AdvancedValidationAgent")
|
309 |
|
310 |
try:
|
311 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
312 |
logger.info(f"Using agent LLM: {agent_llm_model}")
|
313 |
|
314 |
# Load system prompt
|
agents/long_context_management_agent.py
CHANGED
@@ -135,7 +135,7 @@ def summarize_long_context(detail_level: Literal["brief", "standard", "detailed"
|
|
135 |
)
|
136 |
|
137 |
try:
|
138 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
139 |
logger.info(f"Using summarization LLM: {llm_model}")
|
140 |
response = llm.complete(prompt)
|
141 |
summary = response.text.strip()
|
@@ -315,7 +315,7 @@ def initialize_long_context_management_agent() -> ReActAgent:
|
|
315 |
raise ValueError("GEMINI_API_KEY must be set for LongContextManagementAgent")
|
316 |
|
317 |
try:
|
318 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
319 |
logger.info(f"Using agent LLM: {agent_llm_model}")
|
320 |
Settings.llm = llm # Set default LLM for LlamaIndex components used by tools
|
321 |
|
|
|
135 |
)
|
136 |
|
137 |
try:
|
138 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
139 |
logger.info(f"Using summarization LLM: {llm_model}")
|
140 |
response = llm.complete(prompt)
|
141 |
summary = response.text.strip()
|
|
|
315 |
raise ValueError("GEMINI_API_KEY must be set for LongContextManagementAgent")
|
316 |
|
317 |
try:
|
318 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
319 |
logger.info(f"Using agent LLM: {agent_llm_model}")
|
320 |
Settings.llm = llm # Set default LLM for LlamaIndex components used by tools
|
321 |
|
agents/math_agent.py
CHANGED
@@ -668,6 +668,8 @@ def initialize_math_agent() -> ReActAgent:
|
|
668 |
- Clearly state which tool you are using and why.
|
669 |
- Handle potential errors gracefully and report them if they prevent finding a solution.
|
670 |
- Pay close attention to input formats required by each tool (e.g., lists for vectors/matrices, strings for symbolic expressions).
|
|
|
|
|
671 |
"""
|
672 |
|
673 |
agent = ReActAgent(
|
|
|
668 |
- Clearly state which tool you are using and why.
|
669 |
- Handle potential errors gracefully and report them if they prevent finding a solution.
|
670 |
- Pay close attention to input formats required by each tool (e.g., lists for vectors/matrices, strings for symbolic expressions).
|
671 |
+
|
672 |
+
If your response exceeds the maximum token limit and cannot be completed in a single reply, please conclude your output with the marker [CONTINUE]. In subsequent interactions, I will prompt you with “continue” to receive the next portion of the response.
|
673 |
"""
|
674 |
|
675 |
agent = ReActAgent(
|
agents/planner_agent.py
CHANGED
@@ -57,7 +57,7 @@ def plan(objective: str) -> List[str]:
|
|
57 |
)
|
58 |
|
59 |
try:
|
60 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
61 |
logger.info(f"Using planning LLM: {planner_llm_model}")
|
62 |
response = llm.complete(input_prompt)
|
63 |
|
@@ -131,7 +131,7 @@ def synthesize_and_report(results: List[Dict[str, str]]) -> str:
|
|
131 |
"""
|
132 |
|
133 |
try:
|
134 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
135 |
logger.info(f"Using synthesis LLM: {synthesizer_llm_model}")
|
136 |
response = llm.complete(input_prompt)
|
137 |
logger.info("Synthesis successful.")
|
@@ -180,7 +180,7 @@ def answer_question(question: str) -> str:
|
|
180 |
)
|
181 |
|
182 |
try:
|
183 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
184 |
logger.info(f"Using answer LLM: {model_name}")
|
185 |
response = llm.complete(assistant_prompt)
|
186 |
logger.info("Answer generated successfully.")
|
@@ -232,7 +232,7 @@ def initialize_planner_agent() -> ReActAgent:
|
|
232 |
raise ValueError("GEMINI_API_KEY must be set for PlannerAgent")
|
233 |
|
234 |
try:
|
235 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
236 |
logger.info(f"Using agent LLM: {agent_llm_model}")
|
237 |
|
238 |
# Load system prompt
|
|
|
57 |
)
|
58 |
|
59 |
try:
|
60 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
61 |
logger.info(f"Using planning LLM: {planner_llm_model}")
|
62 |
response = llm.complete(input_prompt)
|
63 |
|
|
|
131 |
"""
|
132 |
|
133 |
try:
|
134 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
135 |
logger.info(f"Using synthesis LLM: {synthesizer_llm_model}")
|
136 |
response = llm.complete(input_prompt)
|
137 |
logger.info("Synthesis successful.")
|
|
|
180 |
)
|
181 |
|
182 |
try:
|
183 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
184 |
logger.info(f"Using answer LLM: {model_name}")
|
185 |
response = llm.complete(assistant_prompt)
|
186 |
logger.info("Answer generated successfully.")
|
|
|
232 |
raise ValueError("GEMINI_API_KEY must be set for PlannerAgent")
|
233 |
|
234 |
try:
|
235 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
236 |
logger.info(f"Using agent LLM: {agent_llm_model}")
|
237 |
|
238 |
# Load system prompt
|
agents/reasoning_agent.py
CHANGED
@@ -124,7 +124,7 @@ def answer_question(question: str) -> str:
|
|
124 |
)
|
125 |
|
126 |
try:
|
127 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
128 |
logger.info(f"Using answer LLM: {model_name}")
|
129 |
response = llm.complete(assistant_prompt)
|
130 |
logger.info("Answer generated successfully.")
|
@@ -167,7 +167,7 @@ def initialize_reasoning_agent() -> ReActAgent:
|
|
167 |
raise ValueError("GEMINI_API_KEY must be set for ReasoningAgent")
|
168 |
|
169 |
try:
|
170 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
171 |
logger.info(f"Using agent LLM: {agent_llm_model}")
|
172 |
|
173 |
# Load system prompt
|
|
|
124 |
)
|
125 |
|
126 |
try:
|
127 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
128 |
logger.info(f"Using answer LLM: {model_name}")
|
129 |
response = llm.complete(assistant_prompt)
|
130 |
logger.info("Answer generated successfully.")
|
|
|
167 |
raise ValueError("GEMINI_API_KEY must be set for ReasoningAgent")
|
168 |
|
169 |
try:
|
170 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
171 |
logger.info(f"Using agent LLM: {agent_llm_model}")
|
172 |
|
173 |
# Load system prompt
|
agents/research_agent.py
CHANGED
@@ -571,7 +571,7 @@ async def answer_question(ctx: Context, question: str) -> str:
|
|
571 |
)
|
572 |
|
573 |
try:
|
574 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
575 |
logger.info(f"Using answer LLM: {model_name}")
|
576 |
response = llm.complete(assistant_prompt)
|
577 |
logger.info("Answer generated successfully.")
|
@@ -640,7 +640,7 @@ class ResearchAgentInitializer:
|
|
640 |
logger.error("GEMINI_API_KEY not found for ResearchAgent LLM.")
|
641 |
raise ValueError("GEMINI_API_KEY must be set for ResearchAgent")
|
642 |
try:
|
643 |
-
self.llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
644 |
logger.info(f"ResearchAgent LLM initialized: {agent_llm_model}")
|
645 |
except Exception as e:
|
646 |
logger.error(f"Failed to initialize ResearchAgent LLM: {e}", exc_info=True)
|
@@ -1006,6 +1006,8 @@ class ResearchAgentInitializer:
|
|
1006 |
- reasoning_agent: multi‑hop logical reasoning.
|
1007 |
|
1008 |
Do not delegate to any agent outside this list.
|
|
|
|
|
1009 |
"""
|
1010 |
|
1011 |
agent = ReActAgent(
|
|
|
571 |
)
|
572 |
|
573 |
try:
|
574 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
575 |
logger.info(f"Using answer LLM: {model_name}")
|
576 |
response = llm.complete(assistant_prompt)
|
577 |
logger.info("Answer generated successfully.")
|
|
|
640 |
logger.error("GEMINI_API_KEY not found for ResearchAgent LLM.")
|
641 |
raise ValueError("GEMINI_API_KEY must be set for ResearchAgent")
|
642 |
try:
|
643 |
+
self.llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
644 |
logger.info(f"ResearchAgent LLM initialized: {agent_llm_model}")
|
645 |
except Exception as e:
|
646 |
logger.error(f"Failed to initialize ResearchAgent LLM: {e}", exc_info=True)
|
|
|
1006 |
- reasoning_agent: multi‑hop logical reasoning.
|
1007 |
|
1008 |
Do not delegate to any agent outside this list.
|
1009 |
+
|
1010 |
+
If your response exceeds the maximum token limit and cannot be completed in a single reply, please conclude your output with the marker [CONTINUE]. In subsequent interactions, I will prompt you with “continue” to receive the next portion of the response.
|
1011 |
"""
|
1012 |
|
1013 |
agent = ReActAgent(
|
agents/synthesis_agent.py
CHANGED
@@ -109,6 +109,8 @@ Allowed targets when more work required:
|
|
109 |
• research_agent – missing data
|
110 |
• reasoning_agent – reconcile complex logic
|
111 |
• long_context_management_agent – compress oversized context before answer
|
|
|
|
|
112 |
"""
|
113 |
|
114 |
# -----------------------------------------------------------------------------
|
|
|
109 |
• research_agent – missing data
|
110 |
• reasoning_agent – reconcile complex logic
|
111 |
• long_context_management_agent – compress oversized context before answer
|
112 |
+
|
113 |
+
If your response exceeds the maximum token limit and cannot be completed in a single reply, please conclude your output with the marker [CONTINUE]. In subsequent interactions, I will prompt you with “continue” to receive the next portion of the response.
|
114 |
"""
|
115 |
|
116 |
# -----------------------------------------------------------------------------
|
agents/text_analyzer_agent.py
CHANGED
@@ -62,7 +62,7 @@ def summarize_text(text: str, max_length: int = 150, min_length: int = 30) -> st
|
|
62 |
)
|
63 |
|
64 |
try:
|
65 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
66 |
logger.info(f"Using summarization LLM: {summarizer_llm_model}")
|
67 |
response = llm.complete(prompt)
|
68 |
summary = response.text.strip()
|
@@ -197,7 +197,7 @@ def initialize_text_analyzer_agent() -> ReActAgent:
|
|
197 |
raise ValueError("GEMINI_API_KEY must be set for TextAnalyzerAgent")
|
198 |
|
199 |
try:
|
200 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
201 |
logger.info(f"Using agent LLM: {agent_llm_model}")
|
202 |
|
203 |
# Load system prompt
|
|
|
62 |
)
|
63 |
|
64 |
try:
|
65 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
66 |
logger.info(f"Using summarization LLM: {summarizer_llm_model}")
|
67 |
response = llm.complete(prompt)
|
68 |
summary = response.text.strip()
|
|
|
197 |
raise ValueError("GEMINI_API_KEY must be set for TextAnalyzerAgent")
|
198 |
|
199 |
try:
|
200 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
201 |
logger.info(f"Using agent LLM: {agent_llm_model}")
|
202 |
|
203 |
# Load system prompt
|
agents/video_analyzer_agent.py
CHANGED
@@ -185,7 +185,7 @@ def download_video_and_analyze(video_url: str) -> str:
|
|
185 |
blocks.append(ImageBlock(path=frame_path))
|
186 |
|
187 |
|
188 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
189 |
logger.info("Using LLM model: %s", llm_model_name)
|
190 |
response = llm.chat([ChatMessage(role="user", blocks=blocks)])
|
191 |
|
@@ -331,7 +331,7 @@ def initialize_video_analyzer_agent() -> FunctionAgent:
|
|
331 |
raise ValueError("GEMINI_API_KEY must be set")
|
332 |
|
333 |
try:
|
334 |
-
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05
|
335 |
logger.info("Using LLM model: %s", llm_model_name)
|
336 |
|
337 |
system_prompt = """
|
@@ -392,6 +392,10 @@ def initialize_video_analyzer_agent() -> FunctionAgent:
|
|
392 |
- Good: “At 02:15 the speaker shows a slide titled ‘Transformer Architecture’.”
|
393 |
- Bad: “There is some sort of diagram that maybe explains something about the
|
394 |
architecture; it might be a transformer but it is hard to tell.”
|
|
|
|
|
|
|
|
|
395 |
|
396 |
End of prompt.
|
397 |
"""
|
|
|
185 |
blocks.append(ImageBlock(path=frame_path))
|
186 |
|
187 |
|
188 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
189 |
logger.info("Using LLM model: %s", llm_model_name)
|
190 |
response = llm.chat([ChatMessage(role="user", blocks=blocks)])
|
191 |
|
|
|
331 |
raise ValueError("GEMINI_API_KEY must be set")
|
332 |
|
333 |
try:
|
334 |
+
llm = GoogleGenAI(api_key=gemini_api_key, model="gemini-2.5-pro-preview-03-25", temperature=0.05)
|
335 |
logger.info("Using LLM model: %s", llm_model_name)
|
336 |
|
337 |
system_prompt = """
|
|
|
392 |
- Good: “At 02:15 the speaker shows a slide titled ‘Transformer Architecture’.”
|
393 |
- Bad: “There is some sort of diagram that maybe explains something about the
|
394 |
architecture; it might be a transformer but it is hard to tell.”
|
395 |
+
|
396 |
+
If your response exceeds the maximum token limit and cannot be completed in a single reply,
|
397 |
+
please conclude your output with the marker [CONTINUE]. In subsequent interactions,
|
398 |
+
I will prompt you with “continue” to receive the next portion of the response.
|
399 |
|
400 |
End of prompt.
|
401 |
"""
|
prompts/advanced_validation_agent_prompt.txt
CHANGED
@@ -29,3 +29,5 @@ You are AdvancedValidationAgent, a specialized agent focused on rigorously evalu
|
|
29 |
* Prioritize accuracy and objectivity in your assessment.
|
30 |
* Handoff to `research_agent` if external web searching is required for fact-checking beyond provided sources.
|
31 |
|
|
|
|
|
|
29 |
* Prioritize accuracy and objectivity in your assessment.
|
30 |
* Handoff to `research_agent` if external web searching is required for fact-checking beyond provided sources.
|
31 |
|
32 |
+
If your response exceeds the maximum token limit and cannot be completed in a single reply, please conclude your output with the marker [CONTINUE]. In subsequent interactions, I will prompt you with “continue” to receive the next portion of the response.
|
33 |
+
|
prompts/code_gen_prompt.txt
CHANGED
@@ -49,6 +49,8 @@ Notes:
|
|
49 |
youtube-transcript-api>=1.0.3,
|
50 |
yt-dlp>=2025.3.31
|
51 |
|
|
|
|
|
52 |
Prompt: {prompt}
|
53 |
|
54 |
Code:
|
|
|
49 |
youtube-transcript-api>=1.0.3,
|
50 |
yt-dlp>=2025.3.31
|
51 |
|
52 |
+
If your response exceeds the maximum token limit and cannot be completed in a single reply, please conclude your output with the marker [CONTINUE]. In subsequent interactions, I will prompt you with “continue” to receive the next portion of the response.
|
53 |
+
|
54 |
Prompt: {prompt}
|
55 |
|
56 |
Code:
|
prompts/figure_interpretation_agent_prompt.txt
CHANGED
@@ -27,3 +27,4 @@ You are FigureInterpretationAgent, a specialized agent designed to analyze and i
|
|
27 |
* Base interpretations strictly on the visual information present in the image.
|
28 |
* Requires multimodal input capabilities to process the image file.
|
29 |
|
|
|
|
27 |
* Base interpretations strictly on the visual information present in the image.
|
28 |
* Requires multimodal input capabilities to process the image file.
|
29 |
|
30 |
+
If your response exceeds the maximum token limit and cannot be completed in a single reply, please conclude your output with the marker [CONTINUE]. In subsequent interactions, I will prompt you with “continue” to receive the next portion of the response.
|
prompts/image_analyzer_prompt.txt
CHANGED
@@ -67,3 +67,4 @@ You are ImageAnalyzerAgent, an expert in cold, factual visual analysis. Your sol
|
|
67 |
By adhering to these instructions, ensure your visual analysis is cold, factual, comprehensive, and
|
68 |
completely devoid of subjectivity before handing off.
|
69 |
|
|
|
|
67 |
By adhering to these instructions, ensure your visual analysis is cold, factual, comprehensive, and
|
68 |
completely devoid of subjectivity before handing off.
|
69 |
|
70 |
+
If your response exceeds the maximum token limit and cannot be completed in a single reply, please conclude your output with the marker [CONTINUE]. In subsequent interactions, I will prompt you with “continue” to receive the next portion of the response.
|
prompts/long_context_management_agent_prompt.txt
CHANGED
@@ -26,3 +26,4 @@ You are LongContextManagementAgent, a specialized agent responsible for handling
|
|
26 |
* Handle potentially very large inputs efficiently (consider chunking, indexing).
|
27 |
* Clearly indicate if requested information cannot be found within the provided context.
|
28 |
|
|
|
|
26 |
* Handle potentially very large inputs efficiently (consider chunking, indexing).
|
27 |
* Clearly indicate if requested information cannot be found within the provided context.
|
28 |
|
29 |
+
If your response exceeds the maximum token limit and cannot be completed in a single reply, please conclude your output with the marker [CONTINUE]. In subsequent interactions, I will prompt you with “continue” to receive the next portion of the response.
|
prompts/planner_agent_prompt.txt
CHANGED
@@ -40,4 +40,6 @@ Only the following agents are available: **code_agent**, **research_agent**, **m
|
|
40 |
Do **not** invoke any other agents (e.g., **chess_agent**, **educate_agent**, **game_agent**, etc.).
|
41 |
|
42 |
**Finalize**
|
43 |
-
After all sub-questions have been addressed, by hand-off or self-answer, and the plan has passed **advanced_validation_agent**, compile and present the ultimate, coherent solution using the `answer_question` tool, ensuring your final response follows the required format and includes your chain of thought.
|
|
|
|
|
|
40 |
Do **not** invoke any other agents (e.g., **chess_agent**, **educate_agent**, **game_agent**, etc.).
|
41 |
|
42 |
**Finalize**
|
43 |
+
After all sub-questions have been addressed, by hand-off or self-answer, and the plan has passed **advanced_validation_agent**, compile and present the ultimate, coherent solution using the `answer_question` tool, ensuring your final response follows the required format and includes your chain of thought.
|
44 |
+
|
45 |
+
If your response exceeds the maximum token limit and cannot be completed in a single reply, please conclude your output with the marker [CONTINUE]. In subsequent interactions, I will prompt you with “continue” to receive the next portion of the response.
|
prompts/reasoning_agent_prompt.txt
CHANGED
@@ -21,3 +21,4 @@ You are **ReasoningAgent**, an advanced cognitive engine specialized in rigorous
|
|
21 |
- No direct access to external data sources or the internet; all inference happens via the provided tools.
|
22 |
- Do not skip any step: reasoning → planning → validation → (if approved) final answer.
|
23 |
|
|
|
|
21 |
- No direct access to external data sources or the internet; all inference happens via the provided tools.
|
22 |
- Do not skip any step: reasoning → planning → validation → (if approved) final answer.
|
23 |
|
24 |
+
If your response exceeds the maximum token limit and cannot be completed in a single reply, please conclude your output with the marker [CONTINUE]. In subsequent interactions, I will prompt you with “continue” to receive the next portion of the response.
|
prompts/text_analyzer_prompt.txt
CHANGED
@@ -41,3 +41,4 @@ You are TextAnalyzerAgent, an expert text‐analysis assistant. On each request
|
|
41 |
|
42 |
Follow this Thought→Action→Observation→… cycle rigorously to produce consistent, reliable analyses.
|
43 |
|
|
|
|
41 |
|
42 |
Follow this Thought→Action→Observation→… cycle rigorously to produce consistent, reliable analyses.
|
43 |
|
44 |
+
If your response exceeds the maximum token limit and cannot be completed in a single reply, please conclude your output with the marker [CONTINUE]. In subsequent interactions, I will prompt you with “continue” to receive the next portion of the response.
|
prompts/video_analyzer_prompt.txt
CHANGED
@@ -83,3 +83,4 @@ You are **VideoAnalyzerAgent**, an expert in cold, factual **audiovisual** analy
|
|
83 |
|
84 |
By adhering to these instructions, ensure your audiovisual analysis is cold, factual, comprehensive, and completely devoid of subjectivity before handing off.
|
85 |
|
|
|
|
83 |
|
84 |
By adhering to these instructions, ensure your audiovisual analysis is cold, factual, comprehensive, and completely devoid of subjectivity before handing off.
|
85 |
|
86 |
+
If your response exceeds the maximum token limit and cannot be completed in a single reply, please conclude your output with the marker [CONTINUE]. In subsequent interactions, I will prompt you with “continue” to receive the next portion of the response.
|