Spaces:
Build error
Build error
File size: 1,896 Bytes
81d00fe 401799d 81d00fe 401799d 81d00fe 401799d 81d00fe 401799d 81d00fe 401799d 81d00fe 401799d 81d00fe 401799d 81d00fe 401799d 81d00fe 401799d 81d00fe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import logging
import os
import uuid
from graph import agent_graph
# Configure logging
logging.basicConfig(level=logging.INFO) # Default to INFO level
logger = logging.getLogger(__name__)
# Enable LiteLLM debug logging only if environment variable is set
import litellm
if os.getenv("LITELLM_DEBUG", "false").lower() == "true":
litellm.set_verbose = True
logger.setLevel(logging.DEBUG)
else:
litellm.set_verbose = False
logger.setLevel(logging.INFO)
class AgentRunner:
"""Runner class for the code agent."""
def __init__(self):
"""Initialize the agent runner with graph and tools."""
logger.info("Initializing AgentRunner")
self.graph = agent_graph
self.last_state = None # Store the last state for testing/debugging
def __call__(self, question: str) -> str:
"""Process a question through the agent graph and return the answer.
Args:
question: The question to process
Returns:
str: The agent's response
"""
try:
logger.info(f"Processing question: {question}")
initial_state = {
"question": question,
"messages": [],
"answer": None,
"step_logs": [],
"is_complete": False, # Initialize is_complete
"step_count": 0, # Initialize step_count
}
# Generate a unique thread_id for this interaction
thread_id = str(uuid.uuid4())
config = {"configurable": {"thread_id": thread_id}}
final_state = self.graph.invoke(initial_state, config)
self.last_state = final_state # Store the final state
return final_state.get("answer", "No answer generated")
except Exception as e:
logger.error(f"Error processing question: {str(e)}")
raise
|