Update src/populate.py
Browse files- src/populate.py +9 -5
src/populate.py
CHANGED
@@ -9,7 +9,6 @@ from src.leaderboard.read_evals import get_raw_eval_results
|
|
9 |
|
10 |
|
11 |
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
|
12 |
-
"""Creates a dataframe from all the individual experiment results"""
|
13 |
raw_data = get_raw_eval_results(results_path, requests_path)
|
14 |
all_data_json = [v.to_dict() for v in raw_data]
|
15 |
|
@@ -17,13 +16,17 @@ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchm
|
|
17 |
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
|
18 |
df = df[cols].round(decimals=2)
|
19 |
|
20 |
-
#
|
|
|
|
|
|
|
|
|
21 |
df = df[has_no_nan_values(df, benchmark_cols)]
|
22 |
return df
|
23 |
|
24 |
|
25 |
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
26 |
-
"""
|
27 |
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
|
28 |
all_evals = []
|
29 |
|
@@ -38,8 +41,9 @@ def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
|
38 |
|
39 |
all_evals.append(data)
|
40 |
elif ".md" not in entry:
|
41 |
-
#
|
42 |
-
sub_entries = [e for e in os.listdir(
|
|
|
43 |
for sub_entry in sub_entries:
|
44 |
file_path = os.path.join(save_path, entry, sub_entry)
|
45 |
with open(file_path) as fp:
|
|
|
9 |
|
10 |
|
11 |
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
|
|
|
12 |
raw_data = get_raw_eval_results(results_path, requests_path)
|
13 |
all_data_json = [v.to_dict() for v in raw_data]
|
14 |
|
|
|
16 |
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
|
17 |
df = df[cols].round(decimals=2)
|
18 |
|
19 |
+
# ๋ชจ๋ธ๋ช
์ปฌ๋ผ์ ํด๋ฆญ ๊ฐ๋ฅํ ๋งํฌ ์ ์ฉ
|
20 |
+
if "model" in df.columns:
|
21 |
+
df["model"] = df["model"].apply(make_clickable_model)
|
22 |
+
|
23 |
+
# ๋ชจ๋ ๋ฒค์น๋งํฌ๊ฐ ์์ฐ๋์ง ์์ ํ์ ํํฐ๋ง
|
24 |
df = df[has_no_nan_values(df, benchmark_cols)]
|
25 |
return df
|
26 |
|
27 |
|
28 |
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
29 |
+
"""ํ๊ฐ ๋๊ธฐ์ด์ ๋ํ ๊ฐ DataFrame์ ์์ฑํฉ๋๋ค."""
|
30 |
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
|
31 |
all_evals = []
|
32 |
|
|
|
41 |
|
42 |
all_evals.append(data)
|
43 |
elif ".md" not in entry:
|
44 |
+
# ํด๋์ธ ๊ฒฝ์ฐ: ํ์ผ ์ฌ๋ถ๋ฅผ ํ์ธํ ๋ ์ ์ฒด ๊ฒฝ๋ก๋ฅผ ์ฌ์ฉ
|
45 |
+
sub_entries = [e for e in os.listdir(os.path.join(save_path, entry))
|
46 |
+
if os.path.isfile(os.path.join(save_path, entry, e)) and not e.startswith(".")]
|
47 |
for sub_entry in sub_entries:
|
48 |
file_path = os.path.join(save_path, entry, sub_entry)
|
49 |
with open(file_path) as fp:
|