sandz7 commited on
Commit
99033fb
Β·
1 Parent(s): d5cc98b

added login for HF

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -1,10 +1,12 @@
1
  import torch
2
  import gradio as gr
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
4
  import os
5
 
6
- # HF_TOKEN
7
- HF_AUTH_TOKEN = os.getenv('HF_AUTH_TOKEN')
 
8
 
9
  # Open ai api key
10
  API_KEY = os.getenv('OPEN_AI_API_KEY')
@@ -18,7 +20,7 @@ DESCRIPTION = '''
18
 
19
  # Place transformers in hardware to prepare for process and generation
20
  llama_tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
21
- llama_model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", torch_dtype=torch.float16).to('cuda')
22
  terminators = [
23
  llama_tokenizer.eos_token_id,
24
  llama_tokenizer.convert_tokens_to_ids("<|eot_id|>")
 
1
  import torch
2
  import gradio as gr
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ from huggingface_hub import login
5
  import os
6
 
7
+ TOKEN = os.getenv('HF_AUTH_TOKEN')
8
+ login(token=TOKEN,
9
+ add_to_git_credential=False)
10
 
11
  # Open ai api key
12
  API_KEY = os.getenv('OPEN_AI_API_KEY')
 
20
 
21
  # Place transformers in hardware to prepare for process and generation
22
  llama_tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
23
+ llama_model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", token=TOKEN, torch_dtype=torch.float16).to('cuda')
24
  terminators = [
25
  llama_tokenizer.eos_token_id,
26
  llama_tokenizer.convert_tokens_to_ids("<|eot_id|>")