Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -7,9 +7,10 @@ import torch
|
|
7 |
from transformers import MllamaForConditionalGeneration, AutoProcessor
|
8 |
from OmniGen import OmniGenPipeline
|
9 |
|
|
|
10 |
pipe = OmniGenPipeline.from_pretrained("Shitao/OmniGen-v1")
|
11 |
model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct"
|
12 |
-
model = MllamaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto",)
|
13 |
processor = AutoProcessor.from_pretrained(model_id)
|
14 |
|
15 |
@spaces.GPU()
|
|
|
7 |
from transformers import MllamaForConditionalGeneration, AutoProcessor
|
8 |
from OmniGen import OmniGenPipeline
|
9 |
|
10 |
+
Llama32V_HFtoken = os.getenv("Llama32V")
|
11 |
pipe = OmniGenPipeline.from_pretrained("Shitao/OmniGen-v1")
|
12 |
model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct"
|
13 |
+
model = MllamaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto",hf_token = Llama32V_HFtoken)
|
14 |
processor = AutoProcessor.from_pretrained(model_id)
|
15 |
|
16 |
@spaces.GPU()
|