jacob-danner commited on
Commit
110bcd9
·
verified ·
1 Parent(s): 30f5300

Training in progress, epoch 1

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: gemma
4
+ base_model: google/gemma-3-1b-pt
5
+ tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: gpt_1_causual_finetune
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # gpt_1_causual_finetune
16
+
17
+ This model is a fine-tuned version of [google/gemma-3-1b-pt](https://huggingface.co/google/gemma-3-1b-pt) on an unknown dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 0.9766
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 0.00025
39
+ - train_batch_size: 16
40
+ - eval_batch_size: 16
41
+ - seed: 42
42
+ - gradient_accumulation_steps: 4
43
+ - total_train_batch_size: 64
44
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
45
+ - lr_scheduler_type: linear
46
+ - num_epochs: 15
47
+
48
+ ### Training results
49
+
50
+ | Training Loss | Epoch | Step | Validation Loss |
51
+ |:-------------:|:-----:|:----:|:---------------:|
52
+ | 3.2323 | 1.0 | 12 | 2.9389 |
53
+ | 2.6433 | 2.0 | 24 | 2.2669 |
54
+ | 1.9319 | 3.0 | 36 | 1.5997 |
55
+ | 1.4135 | 4.0 | 48 | 1.2555 |
56
+ | 1.193 | 5.0 | 60 | 1.1551 |
57
+ | 1.1195 | 6.0 | 72 | 1.1047 |
58
+ | 1.0782 | 7.0 | 84 | 1.0753 |
59
+ | 1.0507 | 8.0 | 96 | 1.0515 |
60
+ | 1.0263 | 9.0 | 108 | 1.0319 |
61
+ | 1.0072 | 10.0 | 120 | 1.0154 |
62
+ | 0.9911 | 11.0 | 132 | 1.0053 |
63
+ | 0.9781 | 12.0 | 144 | 0.9918 |
64
+ | 0.9675 | 13.0 | 156 | 0.9833 |
65
+ | 0.9597 | 14.0 | 168 | 0.9789 |
66
+ | 0.9552 | 15.0 | 180 | 0.9766 |
67
+
68
+
69
+ ### Framework versions
70
+
71
+ - PEFT 0.15.2.dev0
72
+ - Transformers 4.51.0.dev0
73
+ - Pytorch 2.6.0
74
+ - Datasets 3.4.0
75
+ - Tokenizers 0.21.1
adapter_config.json CHANGED
@@ -24,8 +24,8 @@
24
  "rank_pattern": {},
25
  "revision": null,
26
  "target_modules": [
27
- "q_proj",
28
- "v_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "trainable_token_indices": null,
 
24
  "rank_pattern": {},
25
  "revision": null,
26
  "target_modules": [
27
+ "v_proj",
28
+ "q_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "trainable_token_indices": null,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f76b4cb12d916cefa9130a39af60a021ec9a9c4e6410e2bbc041d3bbad2b77c
3
  size 2995512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9515d0f5031920eb8de18c313f99244a947cf77337262455e1a0b587deed70b9
3
  size 2995512
special_tokens_map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<eos>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d656bdb3cb6e6c27365fde64efcbea4dd20faf64355be88ac4c60e906aea7fa
3
- size 5432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17096d50b7b262cc6ce5b9546e6bed94fc28e6da2b40cd25abf56577f2160707
3
+ size 5368