Upload folder using huggingface_hub
Browse files
model.yml
CHANGED
@@ -1,14 +1,14 @@
|
|
1 |
# BEGIN GENERAL GGUF METADATA
|
2 |
id: qwen2.5-coder
|
3 |
model: qwen2.5-coder
|
4 |
-
name: qwen2.5-coder
|
5 |
version: 1
|
6 |
# END GENERAL GGUF METADATA
|
7 |
|
8 |
# BEGIN INFERENCE PARAMETERS
|
9 |
# BEGIN REQUIRED
|
10 |
stop:
|
11 |
-
- <|im_end|>
|
12 |
# END REQUIRED
|
13 |
|
14 |
# BEGIN OPTIONAL
|
@@ -40,8 +40,9 @@ min_keep: 0
|
|
40 |
# BEGIN MODEL LOAD PARAMETERS
|
41 |
# BEGIN REQUIRED
|
42 |
engine: llama-cpp
|
43 |
-
prompt_template:
|
|
|
44 |
ctx_len: 4096
|
45 |
-
ngl:
|
46 |
# END REQUIRED
|
47 |
# END MODEL LOAD PARAMETERS
|
|
|
1 |
# BEGIN GENERAL GGUF METADATA
|
2 |
id: qwen2.5-coder
|
3 |
model: qwen2.5-coder
|
4 |
+
name: qwen2.5-coder:14b-gguf-q4-km
|
5 |
version: 1
|
6 |
# END GENERAL GGUF METADATA
|
7 |
|
8 |
# BEGIN INFERENCE PARAMETERS
|
9 |
# BEGIN REQUIRED
|
10 |
stop:
|
11 |
+
- <|im_end|>
|
12 |
# END REQUIRED
|
13 |
|
14 |
# BEGIN OPTIONAL
|
|
|
40 |
# BEGIN MODEL LOAD PARAMETERS
|
41 |
# BEGIN REQUIRED
|
42 |
engine: llama-cpp
|
43 |
+
prompt_template:
|
44 |
+
<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n
|
45 |
ctx_len: 4096
|
46 |
+
ngl: 49
|
47 |
# END REQUIRED
|
48 |
# END MODEL LOAD PARAMETERS
|