xeon27 commited on
Commit
f066ed8
·
1 Parent(s): b1f9063

Log df shape

Browse files
Files changed (1) hide show
  1. src/populate.py +2 -1
src/populate.py CHANGED
@@ -44,10 +44,11 @@ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchm
44
 
45
  # # filter out if any of the benchmarks have not been produced
46
  # df = df[has_no_nan_values(df, benchmark_cols)]
 
47
 
48
  # make values clickable and link to log files
49
  for col in benchmark_cols:
50
- df[col] = df[[AutoEvalColumn.model.name, col]].apply(lambda x: f"[{x[col]}]({get_inspect_log_url(model_name=x[AutoEvalColumn.model.name].split('>')[1].split('<')[0], benchmark_name=TASK_NAME_INVERSE_MAP[col]['name'])})" if x[col] != np.nan else "-", axis=1)
51
 
52
  # # make task names clickable and link to inspect-evals repository - this creates issues later
53
  # df = df.rename(columns={col: f"[{col}]({TASK_NAME_INVERSE_MAP[col]['source']})" for col in benchmark_cols})
 
44
 
45
  # # filter out if any of the benchmarks have not been produced
46
  # df = df[has_no_nan_values(df, benchmark_cols)]
47
+ df = df.fillna("-")
48
 
49
  # make values clickable and link to log files
50
  for col in benchmark_cols:
51
+ df[col] = df[[AutoEvalColumn.model.name, col]].apply(lambda x: f"[{x[col]}]({get_inspect_log_url(model_name=x[AutoEvalColumn.model.name].split('>')[1].split('<')[0], benchmark_name=TASK_NAME_INVERSE_MAP[col]['name'])})" if x[col] != "-" else x[col], axis=1)
52
 
53
  # # make task names clickable and link to inspect-evals repository - this creates issues later
54
  # df = df.rename(columns={col: f"[{col}]({TASK_NAME_INVERSE_MAP[col]['source']})" for col in benchmark_cols})