File size: 1,138 Bytes
e7abd9e a8cdc48 e7abd9e 58582d3 e7abd9e bbc2ce3 4668301 bbc2ce3 58582d3 e7abd9e a8cdc48 f2bd5a5 e7abd9e 05677ce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import os
import logging
from huggingface_hub import HfApi
from app.core.cache import cache_config
logger = logging.getLogger(__name__)
# Organization or user who owns the datasets
HF_ORGANIZATION = "stacklok"
# Get HF token directly from environment
HF_TOKEN = os.environ.get("HF_TOKEN")
if not HF_TOKEN:
logger.warning(
"HF_TOKEN not found in environment variables. Some features may be limited."
)
# Initialize HF API
API = HfApi(token=HF_TOKEN)
# Repository configuration
QUEUE_REPO = f"{HF_ORGANIZATION}/llm-security-leaderboard-requests"
AGGREGATED_REPO = f"{HF_ORGANIZATION}/llm-security-leaderboard-contents"
VOTES_REPO = f"{HF_ORGANIZATION}/llm-security-leaderboard-votes"
OFFICIAL_PROVIDERS_REPO = "open-llm-leaderboard/official-providers"
logger.info(f"QUEUE_REPO: {QUEUE_REPO}")
logger.info(f"AGGREGATED_REPO: {AGGREGATED_REPO}")
logger.info(f"VOTES_REPO: {VOTES_REPO}")
logger.info(f"OFFICIAL_PROVIDERS_REPO: {OFFICIAL_PROVIDERS_REPO}")
# File paths from cache config
VOTES_PATH = cache_config.votes_file
EVAL_REQUESTS_PATH = cache_config.eval_requests_file
MODEL_CACHE_DIR = cache_config.models_cache
|