File size: 5,227 Bytes
74270b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
from dataclasses import dataclass, make_dataclass
from enum import Enum


from src.about import TasksRGB, TasksPGB, TasksGUE, TasksGB


def fields(raw_class):
    return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]


# These classes are for user facing column names,
# to avoid having to change them all around the code
# when a modif is needed
@dataclass
class ColumnContent:
    name: str
    type: str
    displayed_by_default: bool
    hidden: bool = False
    never_hidden: bool = False


## Leaderboard columns
auto_eval_columns = []
for eval_col in [TasksRGB, TasksPGB, TasksGUE, TasksGB]:

    auto_eval_column_dict = []
    # Init
    auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
    auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
    # Scores
    auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Rank", "number", True)])
    for task in eval_col:
        auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
    # Model information
    auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
    auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
    auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
    auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
    auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
    auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
    auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
    auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
    auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
    auto_eval_columns.append(auto_eval_column_dict)

# We use make dataclass to dynamically fill the scores from Tasks
AutoEvalColumnRGB = make_dataclass("AutoEvalColumn", auto_eval_columns[0], frozen=True)
AutoEvalColumnPGB = make_dataclass("AutoEvalColumn", auto_eval_columns[1], frozen=True)
AutoEvalColumnGUE = make_dataclass("AutoEvalColumn", auto_eval_columns[2], frozen=True)
AutoEvalColumnGB = make_dataclass("AutoEvalColumn", auto_eval_columns[3], frozen=True)


## For the queue columns in the submission tab
@dataclass(frozen=True)
class EvalQueueColumn:  # Queue column
    model = ColumnContent("model", "markdown", True)
    revision = ColumnContent("revision", "str", True)
    private = ColumnContent("private", "bool", True)
    precision = ColumnContent("precision", "str", True)
    weight_type = ColumnContent("weight_type", "str", "Original")
    status = ColumnContent("status", "str", True)


## All the model information that we might need
@dataclass
class ModelDetails:
    name: str
    display_name: str = ""
    symbol: str = ""  # emoji


class ModelType(Enum):
    PT = ModelDetails(name="pretrained", symbol="🟢")
    FT = ModelDetails(name="fine-tuned", symbol="🔶")
    IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
    RL = ModelDetails(name="RL-tuned", symbol="🟦")
    Unknown = ModelDetails(name="", symbol="?")

    def to_str(self, separator=" "):
        return f"{self.value.symbol}{separator}{self.value.name}"

    @staticmethod
    def from_str(type):
        if "fine-tuned" in type or "🔶" in type:
            return ModelType.FT
        if "pretrained" in type or "🟢" in type:
            return ModelType.PT
        if "RL-tuned" in type or "🟦" in type:
            return ModelType.RL
        if "instruction-tuned" in type or "⭕" in type:
            return ModelType.IFT
        return ModelType.Unknown


class WeightType(Enum):
    Adapter = ModelDetails("Adapter")
    Original = ModelDetails("Original")
    Delta = ModelDetails("Delta")


class Precision(Enum):
    float16 = ModelDetails("float16")
    bfloat16 = ModelDetails("bfloat16")
    Unknown = ModelDetails("?")

    def from_str(precision):
        if precision in ["torch.float16", "float16"]:
            return Precision.float16
        if precision in ["torch.bfloat16", "bfloat16"]:
            return Precision.bfloat16
        return Precision.Unknown


# Column selection
RGB_COLS = [c.name for c in fields(AutoEvalColumnRGB) if not c.hidden]
PGB_COLS = [c.name for c in fields(AutoEvalColumnPGB) if not c.hidden]
GUE_COLS = [c.name for c in fields(AutoEvalColumnGUE) if not c.hidden]
GB_COLS = [c.name for c in fields(AutoEvalColumnGB) if not c.hidden]

EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]

RGB_BENCHMARK_COLS = [t.value.col_name for t in TasksRGB]
PGB_BENCHMARK_COLS = [t.value.col_name for t in TasksPGB]
GUE_BENCHMARK_COLS = [t.value.col_name for t in TasksGUE]
GB_BENCHMARK_COLS = [t.value.col_name for t in TasksGB]