Spaces:
Paused
Paused
removing zerogpu to archive
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ from janus.janusflow.models import MultiModalityCausalLM, VLChatProcessor
|
|
4 |
from PIL import Image
|
5 |
from diffusers.models import AutoencoderKL
|
6 |
import numpy as np
|
7 |
-
import spaces # Import spaces for ZeroGPU compatibility
|
8 |
|
9 |
cuda_device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
10 |
|
@@ -22,7 +22,7 @@ vae = vae.to(torch.bfloat16).to(cuda_device).eval()
|
|
22 |
|
23 |
# Multimodal Understanding function
|
24 |
@torch.inference_mode()
|
25 |
-
@spaces.GPU(duration=120)
|
26 |
# Multimodal Understanding function
|
27 |
def multimodal_understanding(image, question, seed, top_p, temperature):
|
28 |
# Clear CUDA cache before generating
|
@@ -69,7 +69,7 @@ def multimodal_understanding(image, question, seed, top_p, temperature):
|
|
69 |
|
70 |
|
71 |
@torch.inference_mode()
|
72 |
-
@spaces.GPU(duration=120)
|
73 |
def generate(
|
74 |
input_ids,
|
75 |
cfg_weight: float = 2.0,
|
@@ -152,7 +152,7 @@ def unpack(dec, width, height, parallel_size=5):
|
|
152 |
|
153 |
|
154 |
@torch.inference_mode()
|
155 |
-
@spaces.GPU(duration=120)
|
156 |
def generate_image(prompt,
|
157 |
seed=None,
|
158 |
guidance=5,
|
|
|
4 |
from PIL import Image
|
5 |
from diffusers.models import AutoencoderKL
|
6 |
import numpy as np
|
7 |
+
# import spaces # Import spaces for ZeroGPU compatibility
|
8 |
|
9 |
cuda_device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
10 |
|
|
|
22 |
|
23 |
# Multimodal Understanding function
|
24 |
@torch.inference_mode()
|
25 |
+
# @spaces.GPU(duration=120)
|
26 |
# Multimodal Understanding function
|
27 |
def multimodal_understanding(image, question, seed, top_p, temperature):
|
28 |
# Clear CUDA cache before generating
|
|
|
69 |
|
70 |
|
71 |
@torch.inference_mode()
|
72 |
+
# @spaces.GPU(duration=120)
|
73 |
def generate(
|
74 |
input_ids,
|
75 |
cfg_weight: float = 2.0,
|
|
|
152 |
|
153 |
|
154 |
@torch.inference_mode()
|
155 |
+
# @spaces.GPU(duration=120)
|
156 |
def generate_image(prompt,
|
157 |
seed=None,
|
158 |
guidance=5,
|