Spaces:
Sleeping
Sleeping
looker01202
commited on
Commit
Β·
5e9cf4c
1
Parent(s):
363a2e4
using kwargs2
Browse files
app.py
CHANGED
@@ -78,6 +78,21 @@ def load_model():
|
|
78 |
device_map="auto"
|
79 |
)
|
80 |
print(f"β
Loaded HF {primary_checkpoint}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
#apply_template_from_file(tokenizer, CORRECTED_TEMPLATE_FILENAME)
|
82 |
return tokenizer, model, model_name_display # Use HF checkpoint name for display
|
83 |
|
@@ -97,6 +112,23 @@ def load_model():
|
|
97 |
# Load HF Tokenizer (needed for apply_chat_template)
|
98 |
tokenizer = AutoTokenizer.from_pretrained(primary_checkpoint, use_fast=True)
|
99 |
print("β
Loaded HF Tokenizer for template application.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
#apply_template_from_file(tokenizer, CORRECTED_TEMPLATE_FILENAME)
|
101 |
|
102 |
# Load GGUF Model using ctransformers, downloading from Hub
|
|
|
78 |
device_map="auto"
|
79 |
)
|
80 |
print(f"β
Loaded HF {primary_checkpoint}")
|
81 |
+
|
82 |
+
# --- ADD THIS DEBUG BLOCK ---
|
83 |
+
print("--- Inspecting Tokenizer's Default Chat Template ---")
|
84 |
+
if hasattr(tokenizer, 'chat_template') and tokenizer.chat_template:
|
85 |
+
print(tokenizer.chat_template)
|
86 |
+
else:
|
87 |
+
# If no chat_template, check tokenizer_config.json (less direct)
|
88 |
+
if hasattr(tokenizer, 'special_tokens_map'):
|
89 |
+
print("Tokenizer has no 'chat_template' attribute. Special tokens map:")
|
90 |
+
print(tokenizer.special_tokens_map)
|
91 |
+
else:
|
92 |
+
print("Tokenizer does not have a 'chat_template' attribute or it is empty/None.")
|
93 |
+
print("----------------------------------------------------")
|
94 |
+
# --- END DEBUG BLOCK ---
|
95 |
+
|
96 |
#apply_template_from_file(tokenizer, CORRECTED_TEMPLATE_FILENAME)
|
97 |
return tokenizer, model, model_name_display # Use HF checkpoint name for display
|
98 |
|
|
|
112 |
# Load HF Tokenizer (needed for apply_chat_template)
|
113 |
tokenizer = AutoTokenizer.from_pretrained(primary_checkpoint, use_fast=True)
|
114 |
print("β
Loaded HF Tokenizer for template application.")
|
115 |
+
|
116 |
+
# Inside load_model function...
|
117 |
+
|
118 |
+
# --- ADD THIS DEBUG BLOCK ---
|
119 |
+
print("--- Inspecting Tokenizer's Default Chat Template ---")
|
120 |
+
if hasattr(tokenizer, 'chat_template') and tokenizer.chat_template:
|
121 |
+
print(tokenizer.chat_template)
|
122 |
+
else:
|
123 |
+
# If no chat_template, check tokenizer_config.json (less direct)
|
124 |
+
if hasattr(tokenizer, 'special_tokens_map'):
|
125 |
+
print("Tokenizer has no 'chat_template' attribute. Special tokens map:")
|
126 |
+
print(tokenizer.special_tokens_map)
|
127 |
+
else:
|
128 |
+
print("Tokenizer does not have a 'chat_template' attribute or it is empty/None.")
|
129 |
+
print("----------------------------------------------------")
|
130 |
+
# --- END DEBUG BLOCK ---
|
131 |
+
|
132 |
#apply_template_from_file(tokenizer, CORRECTED_TEMPLATE_FILENAME)
|
133 |
|
134 |
# Load GGUF Model using ctransformers, downloading from Hub
|