Spaces:
Sleeping
Sleeping
import os | |
from smolagents import LiteLLMModel | |
from agents.single_agent import create_single_agent | |
from loguru import logger | |
from config import get_ollama_api_base, setup_logger, load_api_keys, get_model_id | |
setup_logger() | |
load_api_keys() | |
# Set environment variables for API keys if needed | |
os.environ["GEMINI_API_KEY"] = str(os.getenv("GEMINI_API_KEY")) | |
use_local = False | |
model_id = get_model_id(provider='openrouter') | |
logger.info(f"Initializing with model: {model_id}") | |
if use_local: | |
# If using Ollama, we need to specify the API base URL | |
# Initialize the LLM model based onx configuration | |
api_base = get_ollama_api_base() | |
logger.info(f"Using Ollama API base: {api_base}") | |
model = LiteLLMModel(model_id=model_id, api_base=api_base) | |
else: | |
model = LiteLLMModel(model_id=model_id) | |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder: | |
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' | |
# Prompt the user for the song name | |
song_data = "Ascending - Unna notte che non passa" | |
agent = create_single_agent(model) | |
prompt = f"""1. Find and extract the lyrics of the song: {song_data}. | |
2. Perform deep lyrics analysis and add translation in case it is not in English. | |
3. Return full lyrics and analysis results in a pretty human-readable format. | |
""" | |
# Agent execution | |
agent.run(prompt) |