categories fix
Browse files- app.py +4 -0
- src/populate.py +4 -3
app.py
CHANGED
@@ -191,6 +191,10 @@ def init_leaderboard(dataframe, visible_columns=None):
|
|
191 |
visible_columns = ['model_name'] + visible_columns
|
192 |
display_df = dataframe[visible_columns].copy()
|
193 |
|
|
|
|
|
|
|
|
|
194 |
# Round numeric columns to 3 decimal places for display
|
195 |
numeric_cols = display_df.select_dtypes(include=np.number).columns
|
196 |
for col in numeric_cols:
|
|
|
191 |
visible_columns = ['model_name'] + visible_columns
|
192 |
display_df = dataframe[visible_columns].copy()
|
193 |
|
194 |
+
print(f"--- DataFrame inside init_leaderboard (before rounding) ---")
|
195 |
+
print(display_df[['model_name', 'macro_accuracy', 'macro_recall', 'total_evals_count']].head() if all(c in display_df.columns for c in ['model_name', 'macro_accuracy', 'macro_recall', 'total_evals_count']) else "Relevant columns not present")
|
196 |
+
print(f"-------------------------------------------------------------")
|
197 |
+
|
198 |
# Round numeric columns to 3 decimal places for display
|
199 |
numeric_cols = display_df.select_dtypes(include=np.number).columns
|
200 |
for col in numeric_cols:
|
src/populate.py
CHANGED
@@ -8,6 +8,7 @@ import pandas as pd
|
|
8 |
import tempfile
|
9 |
from typing import Dict, List, Optional
|
10 |
from datetime import datetime
|
|
|
11 |
|
12 |
from huggingface_hub import hf_hub_download, HfApi
|
13 |
from datasets import load_dataset
|
@@ -201,17 +202,17 @@ def get_category_leaderboard_df(category: str, version="v0") -> pd.DataFrame:
|
|
201 |
if accuracy_values:
|
202 |
filtered_entry["macro_accuracy"] = sum(accuracy_values) / len(accuracy_values)
|
203 |
else:
|
204 |
-
filtered_entry["macro_accuracy"] =
|
205 |
|
206 |
if category_recall_values:
|
207 |
filtered_entry["macro_recall"] = sum(category_recall_values) / len(category_recall_values)
|
208 |
else:
|
209 |
-
filtered_entry["macro_recall"] =
|
210 |
|
211 |
if total_samples > 0:
|
212 |
filtered_entry["total_evals_count"] = total_samples
|
213 |
else:
|
214 |
-
filtered_entry["total_evals_count"] =
|
215 |
|
216 |
filtered_entries.append(filtered_entry)
|
217 |
|
|
|
8 |
import tempfile
|
9 |
from typing import Dict, List, Optional
|
10 |
from datetime import datetime
|
11 |
+
import numpy as np
|
12 |
|
13 |
from huggingface_hub import hf_hub_download, HfApi
|
14 |
from datasets import load_dataset
|
|
|
202 |
if accuracy_values:
|
203 |
filtered_entry["macro_accuracy"] = sum(accuracy_values) / len(accuracy_values)
|
204 |
else:
|
205 |
+
filtered_entry["macro_accuracy"] = np.nan
|
206 |
|
207 |
if category_recall_values:
|
208 |
filtered_entry["macro_recall"] = sum(category_recall_values) / len(category_recall_values)
|
209 |
else:
|
210 |
+
filtered_entry["macro_recall"] = np.nan
|
211 |
|
212 |
if total_samples > 0:
|
213 |
filtered_entry["total_evals_count"] = total_samples
|
214 |
else:
|
215 |
+
filtered_entry["total_evals_count"] = np.nan
|
216 |
|
217 |
filtered_entries.append(filtered_entry)
|
218 |
|