Commit
·
026c7c2
1
Parent(s):
f2ce704
update
Browse files- README.md +2 -2
- config.json +2 -2
- generation_config.json +1 -1
- hf_quant_config.json +1 -1
- model-00001-of-00002.safetensors +2 -2
- model-00002-of-00002.safetensors +2 -2
- model.safetensors.index.json +1 -1
- tokenizer_config.json +2 -1
README.md
CHANGED
@@ -50,7 +50,7 @@ This model is not owned or developed by NVIDIA. This model has been developed an
|
|
50 |
* Linux <br>
|
51 |
|
52 |
## Model Version(s):
|
53 |
-
The model is quantized with nvidia-modelopt **v0.
|
54 |
|
55 |
## Datasets:
|
56 |
* Calibration Dataset: [cnn_dailymail](https://huggingface.co/datasets/abisee/cnn_dailymail) <br>
|
@@ -164,4 +164,4 @@ for output in outputs:
|
|
164 |
|
165 |
```
|
166 |
|
167 |
-
This model can be deployed with an OpenAI Compatible Server via the vLLM backend. Instructions [here](https://docs.vllm.ai/en/latest/getting_started/quickstart.html#openai-compatible-server).
|
|
|
50 |
* Linux <br>
|
51 |
|
52 |
## Model Version(s):
|
53 |
+
The model is quantized with nvidia-modelopt **v0.27.0** <br>
|
54 |
|
55 |
## Datasets:
|
56 |
* Calibration Dataset: [cnn_dailymail](https://huggingface.co/datasets/abisee/cnn_dailymail) <br>
|
|
|
164 |
|
165 |
```
|
166 |
|
167 |
+
This model can be deployed with an OpenAI Compatible Server via the vLLM backend. Instructions [here](https://docs.vllm.ai/en/latest/getting_started/quickstart.html#openai-compatible-server).
|
config.json
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "/opt/zhiyu/ckpts/Meta-Llama-3.1-8B-Instruct/",
|
3 |
"architectures": [
|
4 |
"LlamaForCausalLM"
|
5 |
],
|
@@ -11,6 +10,7 @@
|
|
11 |
128008,
|
12 |
128009
|
13 |
],
|
|
|
14 |
"hidden_act": "silu",
|
15 |
"hidden_size": 4096,
|
16 |
"initializer_range": 0.02,
|
@@ -33,7 +33,7 @@
|
|
33 |
"rope_theta": 500000.0,
|
34 |
"tie_word_embeddings": false,
|
35 |
"torch_dtype": "bfloat16",
|
36 |
-
"transformers_version": "4.
|
37 |
"use_cache": true,
|
38 |
"vocab_size": 128256
|
39 |
}
|
|
|
1 |
{
|
|
|
2 |
"architectures": [
|
3 |
"LlamaForCausalLM"
|
4 |
],
|
|
|
10 |
128008,
|
11 |
128009
|
12 |
],
|
13 |
+
"head_dim": 128,
|
14 |
"hidden_act": "silu",
|
15 |
"hidden_size": 4096,
|
16 |
"initializer_range": 0.02,
|
|
|
33 |
"rope_theta": 500000.0,
|
34 |
"tie_word_embeddings": false,
|
35 |
"torch_dtype": "bfloat16",
|
36 |
+
"transformers_version": "4.50.0.dev0",
|
37 |
"use_cache": true,
|
38 |
"vocab_size": 128256
|
39 |
}
|
generation_config.json
CHANGED
@@ -8,5 +8,5 @@
|
|
8 |
],
|
9 |
"temperature": 0.6,
|
10 |
"top_p": 0.9,
|
11 |
-
"transformers_version": "4.
|
12 |
}
|
|
|
8 |
],
|
9 |
"temperature": 0.6,
|
10 |
"top_p": 0.9,
|
11 |
+
"transformers_version": "4.50.0.dev0"
|
12 |
}
|
hf_quant_config.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
"producer": {
|
3 |
"name": "modelopt",
|
4 |
-
"version": "0.
|
5 |
},
|
6 |
"quantization": {
|
7 |
"quant_algo": "FP8",
|
|
|
1 |
{
|
2 |
"producer": {
|
3 |
"name": "modelopt",
|
4 |
+
"version": "0.27.0"
|
5 |
},
|
6 |
"quantization": {
|
7 |
"quant_algo": "FP8",
|
model-00001-of-00002.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:499ce93537438acbb81ac5bf6f6890d0501b572d85405866705b700f08644a24
|
3 |
+
size 4997857256
|
model-00002-of-00002.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:62e7244fe8487c09413cd9ed683d94d580dca68a4562549f7b62c8f8c0ba5246
|
3 |
+
size 4083429784
|
model.safetensors.index.json
CHANGED
@@ -267,7 +267,7 @@
|
|
267 |
"model.layers.18.mlp.up_proj.weight_scale": "model-00002-of-00002.safetensors",
|
268 |
"model.layers.18.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
269 |
"model.layers.18.self_attn.k_proj.input_scale": "model-00001-of-00002.safetensors",
|
270 |
-
"model.layers.18.self_attn.k_proj.k_scale": "model-
|
271 |
"model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
272 |
"model.layers.18.self_attn.k_proj.weight_scale": "model-00001-of-00002.safetensors",
|
273 |
"model.layers.18.self_attn.o_proj.input_scale": "model-00002-of-00002.safetensors",
|
|
|
267 |
"model.layers.18.mlp.up_proj.weight_scale": "model-00002-of-00002.safetensors",
|
268 |
"model.layers.18.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
269 |
"model.layers.18.self_attn.k_proj.input_scale": "model-00001-of-00002.safetensors",
|
270 |
+
"model.layers.18.self_attn.k_proj.k_scale": "model-00002-of-00002.safetensors",
|
271 |
"model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
272 |
"model.layers.18.self_attn.k_proj.weight_scale": "model-00001-of-00002.safetensors",
|
273 |
"model.layers.18.self_attn.o_proj.input_scale": "model-00002-of-00002.safetensors",
|
tokenizer_config.json
CHANGED
@@ -2053,11 +2053,12 @@
|
|
2053 |
"chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}",
|
2054 |
"clean_up_tokenization_spaces": true,
|
2055 |
"eos_token": "<|eot_id|>",
|
|
|
2056 |
"model_input_names": [
|
2057 |
"input_ids",
|
2058 |
"attention_mask"
|
2059 |
],
|
2060 |
"model_max_length": 131072,
|
2061 |
"pad_token": "<|eot_id|>",
|
2062 |
-
"tokenizer_class": "
|
2063 |
}
|
|
|
2053 |
"chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}",
|
2054 |
"clean_up_tokenization_spaces": true,
|
2055 |
"eos_token": "<|eot_id|>",
|
2056 |
+
"extra_special_tokens": {},
|
2057 |
"model_input_names": [
|
2058 |
"input_ids",
|
2059 |
"attention_mask"
|
2060 |
],
|
2061 |
"model_max_length": 131072,
|
2062 |
"pad_token": "<|eot_id|>",
|
2063 |
+
"tokenizer_class": "PreTrainedTokenizer"
|
2064 |
}
|