Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -67,12 +67,17 @@ def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, pr
|
|
67 |
if language == "italiano":
|
68 |
default_llm = "google/gemma-7b-it"
|
69 |
else:
|
70 |
-
default_llm = "gemma-7b
|
71 |
|
72 |
-
# Try to load the tokenizer and model
|
73 |
try:
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
76 |
except Exception as e:
|
77 |
logger.error(f"Error initializing LLM: {e}")
|
78 |
return None, "Failed to initialize LLM"
|
|
|
67 |
if language == "italiano":
|
68 |
default_llm = "google/gemma-7b-it"
|
69 |
else:
|
70 |
+
default_llm = "google/gemma-7b" # English version
|
71 |
|
72 |
+
# Try to load the tokenizer and model with authentication
|
73 |
try:
|
74 |
+
# Option 1: Using HF_TOKEN environment variable
|
75 |
+
hf_token = os.getenv("HF_TOKEN")
|
76 |
+
if not hf_token:
|
77 |
+
raise ValueError("HF_TOKEN environment variable is not set")
|
78 |
+
|
79 |
+
tokenizer = AutoTokenizer.from_pretrained(default_llm, token=hf_token)
|
80 |
+
model = AutoModelForCausalLM.from_pretrained(default_llm, token=hf_token)
|
81 |
except Exception as e:
|
82 |
logger.error(f"Error initializing LLM: {e}")
|
83 |
return None, "Failed to initialize LLM"
|