SoulLM path adjustment
Browse files
app.py
CHANGED
@@ -52,7 +52,7 @@ mistral_llm = Llama(model_path=mistral_model_path,n_gpu_layers=35,max_new_tokens
|
|
52 |
|
53 |
# Load Saul-Instruct-v1-GGUF.Q4_K_M
|
54 |
print("Loading Saul-Instruct-v1-GGUF.Q4_K_M")
|
55 |
-
hf_hub_download(repo_id="MaziyarPanahi/Saul-Instruct-v1-GGUF", local_dir=".", filename="Saul-Instruct-v1
|
56 |
saul_model_path="./Saul-Instruct-v1-GGUF.Q4_K_M.gguf"
|
57 |
saul_instruct_llm = Llama(model_path=saul_model_path,n_gpu_layers=35,max_new_tokens=256, context_window=4096, n_ctx=32768,n_batch=128,verbose=False)
|
58 |
|
|
|
52 |
|
53 |
# Load Saul-Instruct-v1-GGUF.Q4_K_M
|
54 |
print("Loading Saul-Instruct-v1-GGUF.Q4_K_M")
|
55 |
+
hf_hub_download(repo_id="MaziyarPanahi/Saul-Instruct-v1-GGUF", local_dir=".", filename="Saul-Instruct-v1.Q4_K_M.gguf")
|
56 |
saul_model_path="./Saul-Instruct-v1-GGUF.Q4_K_M.gguf"
|
57 |
saul_instruct_llm = Llama(model_path=saul_model_path,n_gpu_layers=35,max_new_tokens=256, context_window=4096, n_ctx=32768,n_batch=128,verbose=False)
|
58 |
|