Update README.md
Browse files
README.md
CHANGED
@@ -26,6 +26,21 @@ import torch
|
|
26 |
import transformers
|
27 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments,BitsAndBytesConfig
|
28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
prompt = f"""### Instruction:
|
30 |
Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
31 |
|
|
|
26 |
import transformers
|
27 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments,BitsAndBytesConfig
|
28 |
|
29 |
+
bnb_config = BitsAndBytesConfig(
|
30 |
+
load_in_4bit=True,
|
31 |
+
bnb_4bit_use_double_quant=True,
|
32 |
+
bnb_4bit_quant_type="nf4",
|
33 |
+
bnb_4bit_compute_dtype=torch.bfloat16
|
34 |
+
)
|
35 |
+
|
36 |
+
model = "aiplanet/panda-coder-13B"
|
37 |
+
|
38 |
+
base_model = AutoModelForCausalLM.from_pretrained(model, quantization_config=bnb_config, device_map="cuda")
|
39 |
+
|
40 |
+
tokenizer = AutoTokenizer.from_pretrained(model, trust_remote_code=True)
|
41 |
+
tokenizer.pad_token = tokenizer.eos_token
|
42 |
+
tokenizer.padding_side = "right"
|
43 |
+
|
44 |
prompt = f"""### Instruction:
|
45 |
Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
46 |
|