Spaces:
Running
on
Zero
Running
on
Zero
update
Browse files
eagle_vl/serve/inference.py
CHANGED
@@ -20,13 +20,14 @@ logger = logging.getLogger(__name__)
|
|
20 |
|
21 |
def load_model(model_path: str = "NVEagle/Eagle2.5-VL-8B-Preview"):
|
22 |
|
|
|
23 |
# hotfix the model to use flash attention 2
|
24 |
-
config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
|
25 |
config._attn_implementation = "flash_attention_2"
|
26 |
config.vision_config._attn_implementation = "flash_attention_2"
|
27 |
config.text_config._attn_implementation = "flash_attention_2"
|
28 |
print("Successfully set the attn_implementation to flash_attention_2")
|
29 |
-
|
30 |
logger.info(f"token = {token[:4]}***{token[-2:]}")
|
31 |
model = AutoModel.from_pretrained(
|
32 |
model_path,
|
|
|
20 |
|
21 |
def load_model(model_path: str = "NVEagle/Eagle2.5-VL-8B-Preview"):
|
22 |
|
23 |
+
token = os.environ.get("HF_TOKEN")
|
24 |
# hotfix the model to use flash attention 2
|
25 |
+
config = AutoConfig.from_pretrained(model_path, trust_remote_code=True, use_auth_token=token)
|
26 |
config._attn_implementation = "flash_attention_2"
|
27 |
config.vision_config._attn_implementation = "flash_attention_2"
|
28 |
config.text_config._attn_implementation = "flash_attention_2"
|
29 |
print("Successfully set the attn_implementation to flash_attention_2")
|
30 |
+
|
31 |
logger.info(f"token = {token[:4]}***{token[-2:]}")
|
32 |
model = AutoModel.from_pretrained(
|
33 |
model_path,
|