GGmorello commited on
Commit
3875a78
·
verified ·
1 Parent(s): 8cbfa15

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -5,9 +5,9 @@ import torch
5
  from peft import PeftConfig, PeftModel
6
  from transformers import LlamaForCausalLM, AutoTokenizer, BitsAndBytesConfig
7
 
8
- config = PeftConfig.from_pretrained("GGmorello/FLAMES")
9
  model = LlamaForCausalLM.from_pretrained(
10
- config.base_model_name_or_path,
11
  quantization_config=BitsAndBytesConfig(
12
  load_in_4bit=True,
13
  bnb_4bit_quant_type="nf4",
@@ -15,7 +15,7 @@ model = LlamaForCausalLM.from_pretrained(
15
  bnb_4bit_compute_dtype=torch.bfloat16,
16
  ),
17
  )
18
- model = PeftModel.from_pretrained(model, "GGmorello/FLAMES")
19
 
20
 
21
  MAX_SEQ_LEN = 4096
 
5
  from peft import PeftConfig, PeftModel
6
  from transformers import LlamaForCausalLM, AutoTokenizer, BitsAndBytesConfig
7
 
8
+ # config = PeftConfig.from_pretrained("GGmorello/FLAMES")
9
  model = LlamaForCausalLM.from_pretrained(
10
+ "GGmorello/FLAMES_100k",
11
  quantization_config=BitsAndBytesConfig(
12
  load_in_4bit=True,
13
  bnb_4bit_quant_type="nf4",
 
15
  bnb_4bit_compute_dtype=torch.bfloat16,
16
  ),
17
  )
18
+ # model = PeftModel.from_pretrained(model, "GGmorello/FLAMES")
19
 
20
 
21
  MAX_SEQ_LEN = 4096