Spaces:
Runtime error
Runtime error
FlawedLLM
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ import torch
|
|
6 |
# from peft import PeftModel, PeftConfig
|
7 |
|
8 |
|
9 |
-
tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini")
|
10 |
# quantization_config = BitsAndBytesConfig(
|
11 |
# load_in_4bit=True,
|
12 |
# bnb_4bit_use_double_quant=True,
|
@@ -34,8 +34,15 @@ tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini")
|
|
34 |
|
35 |
|
36 |
|
37 |
-
model = AutoModel.from_pretrained("FlawedLLM/Bhashini", load_in_4bit=True, device_map='auto')
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
@spaces.GPU(duration=300)
|
41 |
def chunk_it(input_command):
|
|
|
6 |
# from peft import PeftModel, PeftConfig
|
7 |
|
8 |
|
9 |
+
# tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini")
|
10 |
# quantization_config = BitsAndBytesConfig(
|
11 |
# load_in_4bit=True,
|
12 |
# bnb_4bit_use_double_quant=True,
|
|
|
34 |
|
35 |
|
36 |
|
37 |
+
# model = AutoModel.from_pretrained("FlawedLLM/Bhashini", load_in_4bit=True, device_map='auto')
|
38 |
+
# I highly do NOT suggest - use Unsloth if possible
|
39 |
+
from peft import AutoPeftModelForCausalLM
|
40 |
+
from transformers import AutoTokenizer
|
41 |
+
model = AutoPeftModelForCausalLM.from_pretrained(
|
42 |
+
"FlawedLLM/Bhashini", # YOUR MODEL YOU USED FOR TRAINING
|
43 |
+
load_in_4bit = load_in_4bit,
|
44 |
+
)
|
45 |
+
tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini")
|
46 |
|
47 |
@spaces.GPU(duration=300)
|
48 |
def chunk_it(input_command):
|