Text Generation
Transformers
PyTorch
chatts
feature-extraction
conversational
custom_code
alexanderchemeris commited on
Commit
b7a223e
·
1 Parent(s): f550939

feat: Use unique model_type chatts instead of reusing qwen2

Browse files
Files changed (2) hide show
  1. config.json +1 -1
  2. configuration_qwen2.py +1 -1
config.json CHANGED
@@ -19,7 +19,7 @@
19
  "intermediate_size": 13824,
20
  "max_position_embeddings": 32768,
21
  "max_window_layers": 70,
22
- "model_type": "qwen2",
23
  "num_attention_heads": 40,
24
  "num_hidden_layers": 48,
25
  "num_key_value_heads": 8,
 
19
  "intermediate_size": 13824,
20
  "max_position_embeddings": 32768,
21
  "max_window_layers": 70,
22
+ "model_type": "chatts",
23
  "num_attention_heads": 40,
24
  "num_hidden_layers": 48,
25
  "num_key_value_heads": 8,
configuration_qwen2.py CHANGED
@@ -93,7 +93,7 @@ class Qwen2TSConfig(PretrainedConfig):
93
  >>> configuration = model.config
94
  ```"""
95
 
96
- model_type = "qwen2"
97
  keys_to_ignore_at_inference = ["past_key_values"]
98
 
99
  def __init__(
 
93
  >>> configuration = model.config
94
  ```"""
95
 
96
+ model_type = "chatts"
97
  keys_to_ignore_at_inference = ["past_key_values"]
98
 
99
  def __init__(