Update app.py
Browse files
app.py
CHANGED
@@ -44,12 +44,6 @@ from utils import get_sentence, generate_speech_for_sentence, wave_header_chunk
|
|
44 |
print("Loading Whisper ASR")
|
45 |
whisper_model = WhisperModel("large-v3", device="cuda", compute_type="float16")
|
46 |
|
47 |
-
# Load Mistral LLM
|
48 |
-
print("Loading Mistral LLM")
|
49 |
-
hf_hub_download(repo_id="TheBloke/Mistral-7B-Instruct-v0.1-GGUF", local_dir=".", filename="mistral-7b-instruct-v0.1.Q5_K_M.gguf")
|
50 |
-
mistral_model_path="./mistral-7b-instruct-v0.1.Q5_K_M.gguf"
|
51 |
-
mistral_llm = Llama(model_path=mistral_model_path,n_gpu_layers=35,max_new_tokens=256, context_window=4096, n_ctx=4096, n_batch=128,verbose=False)
|
52 |
-
|
53 |
# Load Saul-Instruct-v1-GGUF.Q4_K_M
|
54 |
print("Loading Saul-Instruct-v1-GGUF.Q4_K_M")
|
55 |
hf_hub_download(repo_id="MaziyarPanahi/Saul-Instruct-v1-GGUF", local_dir=".", filename="Saul-Instruct-v1.Q4_K_M.gguf")
|
|
|
44 |
print("Loading Whisper ASR")
|
45 |
whisper_model = WhisperModel("large-v3", device="cuda", compute_type="float16")
|
46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
# Load Saul-Instruct-v1-GGUF.Q4_K_M
|
48 |
print("Loading Saul-Instruct-v1-GGUF.Q4_K_M")
|
49 |
hf_hub_download(repo_id="MaziyarPanahi/Saul-Instruct-v1-GGUF", local_dir=".", filename="Saul-Instruct-v1.Q4_K_M.gguf")
|