Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,12 @@ from transformers import GPT2LMHeadModel, GPT2Tokenizer, AutoModelForCausalLM
|
|
4 |
from io import StringIO
|
5 |
import os
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
# Load the GPT-2 tokenizer and model
|
8 |
tokenizer_gpt2 = GPT2Tokenizer.from_pretrained('gpt2')
|
9 |
model_gpt2 = GPT2LMHeadModel.from_pretrained('gpt2')
|
@@ -11,7 +17,7 @@ model_gpt2 = GPT2LMHeadModel.from_pretrained('gpt2')
|
|
11 |
# Load the Llama3 model in sharded mode
|
12 |
model_name = "meta-llama/Meta-Llama-3.1-8B"
|
13 |
try:
|
14 |
-
model_llama = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto") # use device_map for automatic sharding
|
15 |
except OSError as e:
|
16 |
print(f"Error loading model: {e}")
|
17 |
|
|
|
4 |
from io import StringIO
|
5 |
import os
|
6 |
|
7 |
+
# Access the Hugging Face API token from environment variables
|
8 |
+
hf_token = os.getenv('HF_API_TOKEN')
|
9 |
+
|
10 |
+
if not hf_token:
|
11 |
+
raise ValueError("Hugging Face API token is not set. Please set the HF_API_TOKEN environment variable.")
|
12 |
+
HfFolder.save_token(hf_token)
|
13 |
# Load the GPT-2 tokenizer and model
|
14 |
tokenizer_gpt2 = GPT2Tokenizer.from_pretrained('gpt2')
|
15 |
model_gpt2 = GPT2LMHeadModel.from_pretrained('gpt2')
|
|
|
17 |
# Load the Llama3 model in sharded mode
|
18 |
model_name = "meta-llama/Meta-Llama-3.1-8B"
|
19 |
try:
|
20 |
+
model_llama = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", token = hf_token) # use device_map for automatic sharding
|
21 |
except OSError as e:
|
22 |
print(f"Error loading model: {e}")
|
23 |
|