command-r / model.yml
Minh141120's picture
Update model.yml
127af9b verified
raw
history blame
554 Bytes
name: command-r:32b
model: command-r:32b
version: 1
# Results Preferences
top_p: 0.95
temperature: 0.7
frequency_penalty: 0
presence_penalty: 0
max_tokens: 131072 # Infer from base config.json -> max_position_embeddings
stream: true # true | false
# Engine / Model Settings
ngl: 33 # Infer from base config.json -> num_attention_heads
ctx_len: 131072 # Infer from base config.json -> max_position_embeddings
engine: llama-cpp
prompt_template: "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"