vakodiya commited on
Commit
bea725e
·
verified ·
1 Parent(s): 6f245d4

Update generate_answer.py

Browse files
Files changed (1) hide show
  1. generate_answer.py +2 -0
generate_answer.py CHANGED
@@ -1,7 +1,9 @@
1
  # from transformers import AutoTokenizer, AutoModelForCausalLM
2
  from transformers import AutoModelForCausalLM, GemmaTokenizer
3
  from langchain.prompts import PromptTemplate
 
4
 
 
5
  # model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True)
6
  # tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True)
7
 
 
1
  # from transformers import AutoTokenizer, AutoModelForCausalLM
2
  from transformers import AutoModelForCausalLM, GemmaTokenizer
3
  from langchain.prompts import PromptTemplate
4
+ import os
5
 
6
+ os.environ["HF_TOKEN"] = os.getenv('HF_TOKEN')
7
  # model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True)
8
  # tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True)
9