File size: 4,190 Bytes
df66f6e
2a5f9fb
 
559d198
 
df66f6e
309aa01
09cabca
a49f289
2a5f9fb
309aa01
 
 
 
2a5f9fb
b5474e9
2a5f9fb
309aa01
2a5f9fb
 
 
3bdbd04
2a5f9fb
976f398
 
309aa01
 
 
 
559d198
 
 
 
 
 
 
 
69825da
559d198
 
 
2d8e1d1
559d198
2d8e1d1
559d198
 
 
 
 
 
309aa01
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2a5f9fb
 
 
 
 
 
 
309aa01
2a5f9fb
 
 
 
 
a49f289
2a5f9fb
 
 
309aa01
2a5f9fb
 
 
 
 
 
 
a49f289
309aa01
2a5f9fb
a49f289
3bdbd04
309aa01
 
a49f289
 
2a5f9fb
 
 
309aa01
 
e8652cf
309aa01
2a5f9fb
309aa01
2a5f9fb
 
 
 
 
309aa01
2a5f9fb
 
309aa01
2a5f9fb
309aa01
2a5f9fb
 
309aa01
2a5f9fb
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import json
from datetime import datetime, timezone

import torch

from src.display.formatting import styled_error, styled_message, styled_warning
from src.display.utils import EvalQueuedModel, LLMJpEvalVersion, VllmVersion
from src.envs import API, EVAL_REQUESTS_PATH, HF_TOKEN, QUEUE_REPO
from src.submission.check_validity import already_submitted_models, check_model_card, is_model_on_hub

REQUESTED_MODELS: set[EvalQueuedModel] = set()

LLM_JP_EVAL_VERSION = LLMJpEvalVersion.current.value.name
VLLM_VERSION = VllmVersion.current.value.name


def add_new_eval(
    model_id: str,
    revision: str,
    precision: str,
    model_type: str,
    add_special_tokens: str,
):
    global REQUESTED_MODELS
    if not REQUESTED_MODELS:
        REQUESTED_MODELS = already_submitted_models(EVAL_REQUESTS_PATH)

    revision = revision or "main"

    # Is the model on the hub?
    model_on_hub, error, config = is_model_on_hub(
        model_name=model_id, revision=revision, token=HF_TOKEN, test_tokenizer=True
    )
    if not model_on_hub:
        return styled_error(f'Model "{model_id}" {error}')
    if precision == "auto":
        dtype = ""
        if hasattr(config, "torch_dtype"):
            dtype = config.torch_dtype
        if dtype == torch.float16:
            precision = "float16"
        elif dtype == torch.bfloat16:
            precision = "bfloat16"
        elif dtype == torch.float32:
            precision = "float32"
        else:
            return styled_error(
                "Unable to retrieve a valid dtype from config.json. Please select an appropriate one from fp16/fp32/bf16 and resubmit."
            )

    model_data = EvalQueuedModel(
        model=model_id,
        revision=revision,
        precision=precision,
        add_special_tokens=add_special_tokens,
        llm_jp_eval_version=LLM_JP_EVAL_VERSION,
        vllm_version=VLLM_VERSION,
    )

    if model_data in REQUESTED_MODELS:
        return styled_warning("This model has already been submitted with the same configuration.")

    if "/" in model_id:
        user_or_org, model_name = model_id.split("/")
    else:
        user_or_org, model_name = "", model_id

    current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")

    if model_type is None or model_type == "":
        return styled_error("Please select a model type.")

    # Is the model info correctly filled?
    try:
        model_info = API.model_info(repo_id=model_id, revision=revision)
    except Exception:
        return styled_error("Could not get your model information. Please fill it up properly.")

    # Were the model card and license filled?
    try:
        _ = model_info.cardData["license"]
    except Exception:
        return styled_error("Please select a license for your model")

    modelcard_OK, error_msg = check_model_card(model_id)
    if not modelcard_OK:
        return styled_error(error_msg)

    # Seems good, creating the eval
    print("Adding new eval")

    eval_entry = {
        "model_type": model_type,
        "model": model_id,
        "precision": precision,
        "revision": revision,
        "add_special_tokens": add_special_tokens,
        "llm_jp_eval_version": LLM_JP_EVAL_VERSION,
        "vllm_version": VLLM_VERSION,
        "status": "PENDING",
        "submitted_time": current_time,
    }

    print("Creating eval file")
    OUT_DIR = EVAL_REQUESTS_PATH / user_or_org
    OUT_DIR.mkdir(parents=True, exist_ok=True)
    out_file_name = f"{model_name}_{current_time.replace(':','-')}.json"
    out_path = OUT_DIR / out_file_name

    with out_path.open("w") as f:
        f.write(json.dumps(eval_entry))

    print("Uploading eval file")
    API.upload_file(
        path_or_fileobj=out_path,
        path_in_repo=out_path.relative_to(EVAL_REQUESTS_PATH).as_posix(),
        repo_id=QUEUE_REPO,
        repo_type="dataset",
        commit_message=f"Add {model_id} to eval queue",
    )
    REQUESTED_MODELS.add(model_data)

    # Remove the local file
    out_path.unlink()

    return styled_message(
        "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
    )