eleftherias commited on
Commit
5e95327
·
1 Parent(s): 68a05b7

Use requests package everywhere

Browse files
backend/app/config/base.py CHANGED
@@ -36,5 +36,5 @@ VOTES_CACHE = CACHE_ROOT / "votes"
36
  EVAL_CACHE = CACHE_ROOT / "eval-queue"
37
 
38
  # Repository configuration
39
- QUEUE_REPO = f"{HF_ORGANIZATION}/requests"
40
  EVAL_REQUESTS_PATH = EVAL_CACHE / "eval_requests.jsonl"
 
36
  EVAL_CACHE = CACHE_ROOT / "eval-queue"
37
 
38
  # Repository configuration
39
+ QUEUE_REPO = f"{HF_ORGANIZATION}/llm-security-leaderboard-requests"
40
  EVAL_REQUESTS_PATH = EVAL_CACHE / "eval_requests.jsonl"
backend/utils/analyze_prod_datasets.py CHANGED
@@ -59,7 +59,7 @@ def analyze_dataset(repo_id: str) -> Dict[str, Any]:
59
  continue
60
 
61
  # Special handling for requests dataset
62
- if repo_id == f"{HF_ORGANIZATION}/requests":
63
  pending_count = 0
64
  completed_count = 0
65
 
@@ -93,7 +93,7 @@ def analyze_dataset(repo_id: str) -> Dict[str, Any]:
93
  }
94
 
95
  # Add request-specific info if applicable
96
- if repo_id == f"{HF_ORGANIZATION}/requests":
97
  response.update(
98
  {
99
  "pending_requests": pending_count,
@@ -117,7 +117,7 @@ def main():
117
  "id": f"{HF_ORGANIZATION}/llm-security-leaderboard-contents",
118
  "description": "Aggregated results",
119
  },
120
- {"id": f"{HF_ORGANIZATION}/requests", "description": "Evaluation requests"},
121
  {"id": f"{HF_ORGANIZATION}/votes", "description": "User votes"},
122
  {
123
  "id": "open-llm-leaderboard/official-providers",
 
59
  continue
60
 
61
  # Special handling for requests dataset
62
+ if repo_id == f"{HF_ORGANIZATION}/llm-security-leaderboard-requests":
63
  pending_count = 0
64
  completed_count = 0
65
 
 
93
  }
94
 
95
  # Add request-specific info if applicable
96
+ if repo_id == f"{HF_ORGANIZATION}/llm-security-leaderboard-requests":
97
  response.update(
98
  {
99
  "pending_requests": pending_count,
 
117
  "id": f"{HF_ORGANIZATION}/llm-security-leaderboard-contents",
118
  "description": "Aggregated results",
119
  },
120
+ {"id": f"{HF_ORGANIZATION}/llm-security-leaderboard-requests", "description": "Evaluation requests"},
121
  {"id": f"{HF_ORGANIZATION}/votes", "description": "User votes"},
122
  {
123
  "id": "open-llm-leaderboard/official-providers",
backend/utils/analyze_prod_models.py CHANGED
@@ -35,7 +35,7 @@ def count_evaluated_models():
35
 
36
  # Get file list
37
  files = api.list_repo_files(
38
- f"{HF_ORGANIZATION}/llm-security-leaderboard-contents", repo_type="dataset"
39
  )
40
 
41
  # Get last commit info
 
35
 
36
  # Get file list
37
  files = api.list_repo_files(
38
+ f"{HF_ORGANIZATION}/results", repo_type="dataset"
39
  )
40
 
41
  # Get last commit info
backend/utils/last_activity.py CHANGED
@@ -72,7 +72,7 @@ def get_last_models(limit: int = 5) -> List[Dict]:
72
  logger.info("Getting commit history...")
73
  commits = list(
74
  api.list_repo_commits(
75
- repo_id=f"{HF_ORGANIZATION}/requests", repo_type="dataset"
76
  )
77
  )
78
  logger.info(f"Found {len(commits)} commits")
@@ -104,7 +104,7 @@ def get_last_models(limit: int = 5) -> List[Dict]:
104
  try:
105
  # Download and read the file
106
  content = api.hf_hub_download(
107
- repo_id=f"{HF_ORGANIZATION}/requests",
108
  filename=file,
109
  repo_type="dataset",
110
  )
 
72
  logger.info("Getting commit history...")
73
  commits = list(
74
  api.list_repo_commits(
75
+ repo_id=f"{HF_ORGANIZATION}/llm-security-leaderboard-requests", repo_type="dataset"
76
  )
77
  )
78
  logger.info(f"Found {len(commits)} commits")
 
104
  try:
105
  # Download and read the file
106
  content = api.hf_hub_download(
107
+ repo_id=f"{HF_ORGANIZATION}/llm-security-leaderboard-requests",
108
  filename=file,
109
  repo_type="dataset",
110
  )