Spaces:
Sleeping
Sleeping
File size: 2,456 Bytes
ce0ec3b a8e40a3 ce0ec3b 7539685 23f553c 7539685 23f553c ce0ec3b 23f553c 033ead0 7539685 033ead0 ce0ec3b 0e9bb01 ce0ec3b 0e9bb01 ce0ec3b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
"""
Configuration parameters for the Lyrics Analyzer Agent.
This module separates configuration from implementation,
making it easier to modify settings without changing code.
"""
import os
import yaml
from loguru import logger
# Logger configuration
def setup_logger():
"""Configure loguru logger with custom formatting."""
logger.remove() # Remove default handlers
logger.add(
lambda msg: print(msg, end=""),
level="INFO",
format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{message}</cyan>"
)
# API configuration
def load_api_keys():
"""Load API keys from environment variables."""
# Gemini API is the default
os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY")
def get_model_id(provider="gemini"):
"""Get the appropriate model ID based on configuration.
Args:
use_local: If True, use test configuration (local development).
If False, use production configuration.
provider: Model provider ('ollama', 'gemini', 'openrouter')
Returns:
String with model ID for the specified provider.
"""
if provider == "ollama":
# return "ollama/gemma3:4b" # Using local Ollama with Gemma 3:4B
return "ollama/qwen2.5-coder:7b"
elif provider == "gemini":
return "gemini/gemini-2.0-flash"
elif provider == "openrouter":
# return "openrouter/google/gemini-2.0-flash-lite-preview-02-05:free"
# return "openrouter/mistralai/mistral-small-3.1-24b-instruct:free"
# return "openrouter/deepseek/deepseek-chat:free"
# return "openrouter/thudm/glm-z1-32b:free"
# return "openrouter/rekaai/reka-flash-3:free"
# return "openrouter/google/gemini-2.5-pro-exp-03-25:free"
return "openrouter/google/gemini-2.0-flash-exp:free"
def get_ollama_api_base():
"""Get the API base URL for Ollama."""
return "http://localhost:11434"
# Load prompts from YAML
def load_prompt_templates():
"""Load prompt templates from YAML file."""
try:
with open("prompts/prompts_hf.yaml", 'r') as stream:
return yaml.safe_load(stream)
except (FileNotFoundError, yaml.YAMLError) as e:
logger.error(f"Error loading prompts.yaml: {e}")
return {} # Return empty dict to avoid breaking the application
# Tool configuration
SEARCH_TOOL_CONFIG = {
"min_delay": 3.0,
"max_delay": 7.0
}
|