yujiepan commited on
Commit
cdd392d
·
verified ·
1 Parent(s): d3736ad

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ pipeline_tag: text-generation
4
+ inference: true
5
+ widget:
6
+ - text: Hello!
7
+ example_title: Hello world
8
+ group: Python
9
+ ---
10
+
11
+ This tiny model is for debugging. It is randomly initialized with the config adapted from [meta-llama/Llama-4-Maverick-17B-128E-Instruct](https://huggingface.co/meta-llama/Llama-4-Maverick-17B-128E-Instruct).
12
+
13
+ ### Example usage:
14
+
15
+ ```python
16
+ import torch
17
+
18
+ from transformers import AutoProcessor, Llama4ForConditionalGeneration
19
+
20
+ model_id = "yujiepan/llama-4-tiny-random"
21
+ processor = AutoProcessor.from_pretrained(model_id)
22
+ model = Llama4ForConditionalGeneration.from_pretrained(
23
+ model_id,
24
+ attn_implementation="sdpa", # flex attention / flash_attention_2 do not work, debugging...
25
+ device_map="auto",
26
+ torch_dtype=torch.bfloat16,
27
+ )
28
+
29
+ url1 = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg"
30
+ url2 = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png"
31
+ messages = [
32
+ {
33
+ "role": "user",
34
+ "content": [
35
+ {"type": "image", "url": url1},
36
+ {"type": "image", "url": url2},
37
+ {"type": "text", "text": "Can you describe how these two images are similar, and how they differ?"},
38
+ ]
39
+ },
40
+ ]
41
+
42
+ inputs = processor.apply_chat_template(
43
+ messages,
44
+ add_generation_prompt=True,
45
+ tokenize=True,
46
+ return_dict=True,
47
+ return_tensors="pt",
48
+ ).to(model.device)
49
+
50
+ outputs = model.generate(
51
+ **inputs,
52
+ max_new_tokens=32,
53
+ )
54
+
55
+ response = processor.batch_decode(outputs[:, inputs["input_ids"].shape[-1]:])[0]
56
+ print(response)
57
+ print(outputs[0])
58
+ ```
59
+
60
+ ### Codes to create this repo:
61
+
62
+ ```python
63
+ import json
64
+
65
+ import torch
66
+
67
+ from huggingface_hub import hf_hub_download
68
+ from transformers import (
69
+ AutoConfig,
70
+ AutoModelForCausalLM,
71
+ AutoProcessor,
72
+ AutoTokenizer,
73
+ GenerationConfig,
74
+ Llama4ForConditionalGeneration,
75
+ pipeline,
76
+ set_seed,
77
+ )
78
+
79
+ source_model_id = "meta-llama/Llama-4-Maverick-17B-128E-Instruct"
80
+ save_folder = "/tmp/yujiepan/llama-4-tiny-random"
81
+
82
+ processor = AutoProcessor.from_pretrained(source_model_id)
83
+ processor.save_pretrained(save_folder)
84
+
85
+ with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r') as f:
86
+ config_json = json.load(f)
87
+ config_json["text_config"]["num_hidden_layers"] = 4 # ensure to trigger no-rope & moe
88
+ config_json["text_config"]["hidden_size"] = 32
89
+ config_json["text_config"]["head_dim"] = 32 # vllm requires dim >= 32
90
+ config_json["text_config"]["num_attention_heads"] = 1
91
+ config_json["text_config"]["num_key_value_heads"] = 1
92
+ config_json['text_config']["use_qk_norm"] = True
93
+ config_json["text_config"]["intermediate_size"] = 64
94
+ config_json["text_config"]["intermediate_size_mlp"] = 128
95
+ config_json["text_config"]["num_local_experts"] = 8
96
+ config_json["text_config"]["tie_word_embeddings"] = True
97
+
98
+ config_json["vision_config"]["num_hidden_layers"] = 2
99
+ config_json["vision_config"]["hidden_size"] = 32
100
+ config_json["vision_config"]["intermediate_size"] = 128
101
+ assert config_json["vision_config"]["intermediate_size"] == int(
102
+ config_json["vision_config"]["hidden_size"] // config_json["vision_config"]["pixel_shuffle_ratio"] ** 2
103
+ )
104
+ config_json["vision_config"]["num_attention_heads"] = 1
105
+ config_json["vision_config"]["projector_input_dim"] = 32
106
+ config_json["vision_config"]["projector_output_dim"] = 32
107
+ config_json["vision_config"]["vision_output_dim"] = 32
108
+ with open(f"{save_folder}/config.json", "w") as f:
109
+ json.dump(config_json, f, indent=2)
110
+
111
+ config = AutoConfig.from_pretrained(
112
+ save_folder,
113
+ )
114
+ print(config)
115
+ torch.set_default_dtype(torch.bfloat16)
116
+ model = Llama4ForConditionalGeneration(config)
117
+ torch.set_default_dtype(torch.float32)
118
+ model.generation_config = GenerationConfig.from_pretrained(
119
+ source_model_id, trust_remote_code=True,
120
+ )
121
+ set_seed(42)
122
+ with torch.no_grad():
123
+ for name, p in sorted(model.named_parameters()):
124
+ torch.nn.init.normal_(p, 0, 0.5)
125
+ print(name, p.shape)
126
+ pass
127
+ model.save_pretrained(save_folder)
128
+ ```
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now(\"%d %b %Y\") %}\n {%- else %}\n {%- set date_string = \"26 Jul 2024\" %}\n {%- endif %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %} \n {%- if messages[0]['content'] is string %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- else %}\n {#- FIXME: The processor requires an array, always. #}\n {%- set system_message = messages[0]['content'][0]['text']|trim %}\n {%- endif %}\n {%- set messages = messages[1:] %}\n {%- set user_supplied_system_message = true %}\n{%- else %}\n {%- set system_message = \"\" %}\n {%- set user_supplied_system_message = false %}\n{%- endif %}\n\n{#- System message if the user supplied one #}\n{%- if user_supplied_system_message %}\n {{- \"<|header_start|>system<|header_end|>\\n\\n\" }}\n {%- if tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n {%- endif %}\n {%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {%- endif %}\n {{- system_message }}\n {{- \"<|eot|>\" }}\n{%- endif %}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|header_start|>user<|header_end|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|header_start|>' + message['role'] + '<|header_end|>\\n\\n' }}\n {%- if message['content'] is string %}\n {{- message['content'] }}\n {%- else %}\n {%- for content in message['content'] %}\n {%- if content['type'] == 'image' %}\n {{- '<|image|>' }}\n {%- elif content['type'] == 'text' %}\n {{- content['text'] }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- \"<|eot|>\" }}\n {%- elif 'tool_calls' in message and message.tool_calls|length > 0 %}\n {{- '<|header_start|>assistant<|header_end|>\\n\\n' -}}\n {{- '<|python_start|>' }}\n {%- if message['content'] is string %}\n {{- message['content'] }}\n {%- else %}\n {%- for content in message['content'] %}\n {%- if content['type'] == 'image' %}\n {{- '<|image|>' }}\n {%- elif content['type'] == 'text' %}\n {{- content['text'] }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- '<|python_end|>' }}\n {%- for tool_call in message.tool_calls %}\n {{- '{\"name\": \"' + tool_call.function.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.function.arguments | tojson }}\n {{- \"}\" }}\n {%- endfor %}\n {{- \"<|eot|>\" }}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|header_start|>ipython<|header_end|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|header_start|>assistant<|header_end|>\\n\\n' }}\n{%- endif %}\n"
3
+ }
config.json ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Llama4ForConditionalGeneration"
4
+ ],
5
+ "boi_token_index": 200080,
6
+ "eoi_token_index": 200081,
7
+ "image_token_index": 200092,
8
+ "model_type": "llama4",
9
+ "text_config": {
10
+ "_attn_implementation_autoset": true,
11
+ "attention_bias": false,
12
+ "attention_chunk_size": 8192,
13
+ "attention_dropout": 0.0,
14
+ "attn_scale": 0.1,
15
+ "attn_temperature_tuning": 4,
16
+ "bos_token_id": 200000,
17
+ "eos_token_id": [
18
+ 200001,
19
+ 200007,
20
+ 200008
21
+ ],
22
+ "floor_scale": 8192,
23
+ "for_llm_compressor": false,
24
+ "head_dim": 32,
25
+ "hidden_act": "silu",
26
+ "hidden_size": 32,
27
+ "initializer_range": 0.02,
28
+ "interleave_moe_layer_step": 2,
29
+ "intermediate_size": 64,
30
+ "intermediate_size_mlp": 128,
31
+ "max_position_embeddings": 1048576,
32
+ "model_type": "llama4_text",
33
+ "moe_layers": [
34
+ 1,
35
+ 3
36
+ ],
37
+ "no_rope_layers": [
38
+ 1,
39
+ 1,
40
+ 1,
41
+ 0
42
+ ],
43
+ "num_attention_heads": 1,
44
+ "num_experts_per_tok": 1,
45
+ "num_hidden_layers": 4,
46
+ "num_key_value_heads": 1,
47
+ "num_local_experts": 8,
48
+ "output_router_logits": false,
49
+ "pad_token_id": 200018,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_scaling": null,
52
+ "rope_theta": 500000.0,
53
+ "router_aux_loss_coef": 0.001,
54
+ "router_jitter_noise": 0.0,
55
+ "tie_word_embeddings": true,
56
+ "torch_dtype": "bfloat16",
57
+ "use_cache": true,
58
+ "use_qk_norm": true,
59
+ "vocab_size": 202048
60
+ },
61
+ "tie_word_embeddings": false,
62
+ "torch_dtype": "bfloat16",
63
+ "transformers_version": "4.51.0",
64
+ "vision_config": {
65
+ "_attn_implementation_autoset": true,
66
+ "attention_dropout": 0.0,
67
+ "hidden_act": "gelu",
68
+ "hidden_size": 32,
69
+ "image_size": 336,
70
+ "initializer_range": 0.02,
71
+ "intermediate_size": 128,
72
+ "model_type": "llama4_vision_model",
73
+ "multi_modal_projector_bias": false,
74
+ "norm_eps": 1e-05,
75
+ "num_attention_heads": 1,
76
+ "num_channels": 3,
77
+ "num_hidden_layers": 2,
78
+ "patch_size": 14,
79
+ "pixel_shuffle_ratio": 0.5,
80
+ "projector_dropout": 0.0,
81
+ "projector_input_dim": 32,
82
+ "projector_output_dim": 32,
83
+ "rope_theta": 10000,
84
+ "vision_feature_layer": -1,
85
+ "vision_feature_select_strategy": "default",
86
+ "vision_output_dim": 32
87
+ }
88
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 200000,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 200001,
6
+ 200007,
7
+ 200008
8
+ ],
9
+ "pad_token_id": 200018,
10
+ "temperature": 0.6,
11
+ "top_p": 0.9,
12
+ "transformers_version": "4.51.0",
13
+ "trust_remote_code": true
14
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dba211539db36f030df1135f8052cafcd5598cf4bfdfca30bcdab171df3249da
3
+ size 13384256
preprocessor_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "do_center_crop": null,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "image_processor_type": "Llama4ImageProcessorFast",
17
+ "image_std": [
18
+ 0.5,
19
+ 0.5,
20
+ 0.5
21
+ ],
22
+ "input_data_format": null,
23
+ "max_patches": 16,
24
+ "processor_class": "Llama4Processor",
25
+ "resample": 2,
26
+ "rescale_factor": 0.00392156862745098,
27
+ "resize_to_max_canvas": false,
28
+ "return_tensors": null,
29
+ "size": {
30
+ "height": 336,
31
+ "width": 336
32
+ }
33
+ }
processor_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "fake_image_token": "<|image|>",
3
+ "image_token": "<|image|>",
4
+ "patch_size": 14,
5
+ "processor_class": "Llama4Processor"
6
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin_of_text|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|eot|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|finetune_right_pad_id|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:172c9eb4beafc72601690da3ccfcede5c2e6806a8d5ec1fca33e22acea8023a4
3
+ size 27948578
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff