Spaces:
Sleeping
Sleeping
File size: 3,182 Bytes
08c62ca 9b5b26a c19d193 bc7ee1f 6aae614 4e257c7 8fe992b 9b5b26a bc7ee1f 08c62ca 5df72d6 9b5b26a 08c62ca 9b5b26a 08c62ca 9b5b26a 8c01ffb 6aae614 bc7ee1f 08c62ca bc7ee1f 13d500a 8c01ffb 9b5b26a 8c01ffb 08c62ca 861422e 08c62ca 8c01ffb 8fe992b 4e257c7 8c01ffb 08c62ca 8fe992b 9b5b26a 08c62ca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
import datetime
import requests
import pytz
import yaml
import time
import logging
from tools.final_answer import FinalAnswerTool
from tools.text_analysis import analyze_text
from Gradio_UI import GradioUI
# Set up logging
logger = logging.getLogger(__name__)
# Below is an example of a tool that does nothing. Amaze us with your creativity !
@tool
def my_cutom_tool(
arg1: str, arg2: int
) -> str: # it's import to specify the return type
# Keep this format for the description / args / args description but feel free to modify the tool
"""A tool that does nothing yet
Args:
arg1: the first argument
arg2: the second argument
"""
return "What magic will you build ?"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
class RetryingHfApiModel(HfApiModel):
"""HfApiModel with retry logic for rate limiting"""
def __init__(self, max_retries=3, retry_delay=2, **kwargs):
super().__init__(**kwargs)
self.max_retries = max_retries
self.retry_delay = retry_delay
async def async_chat(self, *args, **kwargs):
for attempt in range(self.max_retries):
try:
return await super().async_chat(*args, **kwargs)
except Exception as e:
if "429" in str(e) and attempt < self.max_retries - 1:
wait_time = self.retry_delay * (attempt + 1) # Exponential backoff
logger.warning(f"Rate limited. Retrying in {wait_time} seconds...")
time.sleep(wait_time)
continue
raise # Re-raise the exception if it's not a 429 or we're out of retries
# Replace the model initialization with our retrying version
model = RetryingHfApiModel(
max_tokens=2096,
temperature=0.5,
model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
custom_role_conversions=None,
max_retries=3, # Will try up to 3 times
retry_delay=2, # Start with 2 second delay, will increase with each retry
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", "r") as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer, analyze_text], # Added analyze_text tool
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates,
)
GradioUI(agent).launch()
|