jcms-bits commited on
Commit
be8b8e9
·
verified ·
1 Parent(s): 39e929b

Cambio desde el navegador

Browse files

Arreglo de direccion del modelo

Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -4,16 +4,20 @@ from langchain.schema import AIMessage, HumanMessage, SystemMessage
4
  import gradio as gr
5
 
6
  # Define the model path
7
- model_path = "./model/Model-1.2B-Q8_0.gguf"
 
 
 
8
  system_message = "You are a helpful assistant who acts like a pirate."
9
  llm = LlamaCpp(
10
- model_path=model_path,
11
  temperature=0.8,
12
  max_tokens=250,
13
  top_p=0.6,
14
  verbose=True
15
  )
16
 
 
17
  def stream_response(message, history):
18
  print(f"Input: {message}. History: {history}\n")
19
 
 
4
  import gradio as gr
5
 
6
  # Define the model path
7
+ space_model_path = "./model/llama-3.2-1b-instruct-q8_0.gguf"
8
+ model_path = "hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF"
9
+ file_name = "llama-3.2-1b-instruct-q8_0.gguf"
10
+ Llama.from_pretrained(repo_id = model_path, filename=file_name, local_dir="./model")
11
  system_message = "You are a helpful assistant who acts like a pirate."
12
  llm = LlamaCpp(
13
+ model_path=space_model_path,
14
  temperature=0.8,
15
  max_tokens=250,
16
  top_p=0.6,
17
  verbose=True
18
  )
19
 
20
+
21
  def stream_response(message, history):
22
  print(f"Input: {message}. History: {history}\n")
23