Cambio desde el navegador
Browse filesArreglo de direccion del modelo
app.py
CHANGED
@@ -4,16 +4,20 @@ from langchain.schema import AIMessage, HumanMessage, SystemMessage
|
|
4 |
import gradio as gr
|
5 |
|
6 |
# Define the model path
|
7 |
-
|
|
|
|
|
|
|
8 |
system_message = "You are a helpful assistant who acts like a pirate."
|
9 |
llm = LlamaCpp(
|
10 |
-
model_path=
|
11 |
temperature=0.8,
|
12 |
max_tokens=250,
|
13 |
top_p=0.6,
|
14 |
verbose=True
|
15 |
)
|
16 |
|
|
|
17 |
def stream_response(message, history):
|
18 |
print(f"Input: {message}. History: {history}\n")
|
19 |
|
|
|
4 |
import gradio as gr
|
5 |
|
6 |
# Define the model path
|
7 |
+
space_model_path = "./model/llama-3.2-1b-instruct-q8_0.gguf"
|
8 |
+
model_path = "hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF"
|
9 |
+
file_name = "llama-3.2-1b-instruct-q8_0.gguf"
|
10 |
+
Llama.from_pretrained(repo_id = model_path, filename=file_name, local_dir="./model")
|
11 |
system_message = "You are a helpful assistant who acts like a pirate."
|
12 |
llm = LlamaCpp(
|
13 |
+
model_path=space_model_path,
|
14 |
temperature=0.8,
|
15 |
max_tokens=250,
|
16 |
top_p=0.6,
|
17 |
verbose=True
|
18 |
)
|
19 |
|
20 |
+
|
21 |
def stream_response(message, history):
|
22 |
print(f"Input: {message}. History: {history}\n")
|
23 |
|