Delete training_params.json
Browse files- training_params.json +0 -49
training_params.json
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "Team-ACE/ToolACE-2-Llama-3.1-8B",
|
3 |
-
"project_name": "llama-3-8b-ft",
|
4 |
-
"data_path": "llama-3-8b-ft/autotrain-data",
|
5 |
-
"train_split": "train",
|
6 |
-
"valid_split": null,
|
7 |
-
"add_eos_token": true,
|
8 |
-
"block_size": -1,
|
9 |
-
"model_max_length": 2048,
|
10 |
-
"padding": "right",
|
11 |
-
"trainer": "sft",
|
12 |
-
"use_flash_attention_2": false,
|
13 |
-
"log": "none",
|
14 |
-
"disable_gradient_checkpointing": false,
|
15 |
-
"logging_steps": 5,
|
16 |
-
"eval_strategy": "epoch",
|
17 |
-
"save_total_limit": 3,
|
18 |
-
"auto_find_batch_size": false,
|
19 |
-
"mixed_precision": null,
|
20 |
-
"lr": 5e-06,
|
21 |
-
"epochs": 1,
|
22 |
-
"batch_size": 1,
|
23 |
-
"warmup_ratio": 0.1,
|
24 |
-
"gradient_accumulation": 1,
|
25 |
-
"optimizer": "adamw_torch",
|
26 |
-
"scheduler": "cosine_with_restarts",
|
27 |
-
"weight_decay": 0.0001,
|
28 |
-
"max_grad_norm": 0.3,
|
29 |
-
"seed": 42,
|
30 |
-
"chat_template": null,
|
31 |
-
"quantization": "int8",
|
32 |
-
"target_modules": "q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj",
|
33 |
-
"merge_adapter": true,
|
34 |
-
"peft": true,
|
35 |
-
"lora_r": 128,
|
36 |
-
"lora_alpha": 256,
|
37 |
-
"lora_dropout": 0.05,
|
38 |
-
"model_ref": null,
|
39 |
-
"dpo_beta": 0.1,
|
40 |
-
"max_prompt_length": 128,
|
41 |
-
"max_completion_length": null,
|
42 |
-
"prompt_text_column": "autotrain_prompt",
|
43 |
-
"text_column": "autotrain_text",
|
44 |
-
"rejected_text_column": "autotrain_rejected_text",
|
45 |
-
"push_to_hub": true,
|
46 |
-
"username": "neural-coder",
|
47 |
-
"unsloth": false,
|
48 |
-
"distributed_backend": null
|
49 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|