FlawedLLM commited on
Commit
3bcd53e
·
verified ·
1 Parent(s): b106963

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -2,11 +2,12 @@ import re
2
  from peft import PeftModel, PeftConfig
3
  from transformers import AutoModelForCausalLM
4
 
 
 
5
  config = PeftConfig.from_pretrained("FlawedLLM/BhashiniLLM")
6
  base_model = AutoModelForCausalLM.from_pretrained("unsloth/llama-3-8b-bnb-4bit")
7
  model = PeftModel.from_pretrained(base_model, "FlawedLLM/BhashiniLLM")
8
  tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/BhashiniLLM")
9
- @spaces.GPU
10
  def chunk_it(input_command):
11
  inputs = tokenizer(
12
  [
 
2
  from peft import PeftModel, PeftConfig
3
  from transformers import AutoModelForCausalLM
4
 
5
+
6
+ @spaces.GPU
7
  config = PeftConfig.from_pretrained("FlawedLLM/BhashiniLLM")
8
  base_model = AutoModelForCausalLM.from_pretrained("unsloth/llama-3-8b-bnb-4bit")
9
  model = PeftModel.from_pretrained(base_model, "FlawedLLM/BhashiniLLM")
10
  tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/BhashiniLLM")
 
11
  def chunk_it(input_command):
12
  inputs = tokenizer(
13
  [