File size: 5,899 Bytes
ac6a4ef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
"""Define the configurable parameters for the agent supervisor system."""
from __future__ import annotations
from dataclasses import dataclass, field, fields
from typing import Annotated
from langchain_core.runnables import ensure_config
from langgraph.config import get_config
from react_agent import prompts
@dataclass(kw_only=True)
class Configuration:
"""The configuration for the agent supervisor system."""
# Supervisor configuration
supervisor_prompt: str = field(
default=prompts.SUPERVISOR_PROMPT,
metadata={
"description": "The system prompt for the supervisor agent. "
"This prompt guides how the supervisor delegates tasks to worker agents."
},
)
# Planner configuration
planner_prompt: str = field(
default=prompts.PLANNER_PROMPT,
metadata={
"description": "The system prompt for the planner agent. "
"This prompt guides how the planner creates structured plans."
},
)
# Critic configuration
critic_prompt: str = field(
default=prompts.CRITIC_PROMPT,
metadata={
"description": "The system prompt for the critic agent. "
"This prompt guides how the critic evaluates answers."
},
)
# Worker agents configuration
researcher_prompt: str = field(
default=prompts.RESEARCHER_PROMPT,
metadata={
"description": "The system prompt for the researcher agent. "
"This prompt defines the researcher's capabilities and limitations."
},
)
coder_prompt: str = field(
default=prompts.CODER_PROMPT,
metadata={
"description": "The system prompt for the coder agent. "
"This prompt defines the coder's capabilities and approach to programming tasks."
},
)
# Shared configuration
system_prompt: str = field(
default=prompts.SYSTEM_PROMPT,
metadata={
"description": "Legacy system prompt for backward compatibility. "
"This prompt is used when running the agent in non-supervisor mode."
},
)
# LLM Configuration - Default model for backward compatibility
model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
default="openai/gpt-4o-mini",
metadata={
"description": "The default large language model used by the agents (provider/model_name)."
},
)
# Model for the researcher (information gathering) - use powerful model
researcher_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
default="openai/gpt-4o-mini",
metadata={
"description": "The model used by the researcher agent for gathering information (provider/model_name)."
},
)
# Model for the coder (code execution) - use Claude Sonnet
coder_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
default="anthropic/claude-3-5-sonnet-20240620",
metadata={
"description": "The model used by the coder agent for programming tasks (provider/model_name)."
},
)
# Model for lightweight reasoning tasks (planner, supervisor, critic)
planner_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
default="google_genai/gemini-1.5-flash",
metadata={
"description": "The lightweight reasoning model used by the planner, supervisor, and critic (provider/model_name)."
},
)
# Same model used for supervisor and critic (points to planner_model)
supervisor_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
default="google_genai/gemini-1.5-flash",
metadata={
"description": "The model used by the supervisor for routing (provider/model_name)."
},
)
critic_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
default="openai/gpt-4o-mini",
metadata={
"description": "The model used by the critic for evaluation (provider/model_name)."
},
)
# Model for final answer generation - using Claude for precise formatting
final_answer_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
default="anthropic/claude-3-5-sonnet-20240620",
metadata={
"description": "The model used for generating the final answers in GAIA benchmark format (provider/model_name)."
},
)
# Tool Configuration
max_search_results: int = field(
default=5,
metadata={
"description": "The maximum number of search results to return."
},
)
# Execution Configuration
recursion_limit: int = field(
default=50,
metadata={
"description": "Maximum number of recursion steps allowed in the LangGraph execution."
},
)
max_iterations: int = field(
default=12,
metadata={
"description": "Maximum number of iterations allowed to prevent infinite loops."
},
)
allow_agent_to_extract_answers: bool = field(
default=True,
metadata={
"description": "Whether to allow the agent to extract answers from context when formatting fails."
},
)
@classmethod
def from_context(cls) -> Configuration:
"""Create a Configuration instance from a RunnableConfig object."""
try:
config = get_config()
except RuntimeError:
config = None
config = ensure_config(config)
configurable = config.get("configurable") or {}
_fields = {f.name for f in fields(cls) if f.init}
return cls(**{k: v for k, v in configurable.items() if k in _fields})
|