Spaces:
Running
on
Zero
Running
on
Zero
loading image inside inference func
Browse files
hfapp.py
CHANGED
@@ -15,8 +15,8 @@ from app import (
|
|
15 |
@spaces.GPU
|
16 |
@torch.no_grad
|
17 |
def run_inference(model, img):
|
|
|
18 |
model = model.to('cuda')
|
19 |
-
img = img.to('cuda')
|
20 |
print("model on cuda:", next(model.scorenet.net.parameters()).is_cuda)
|
21 |
print("img on cuda:", img.is_cuda)
|
22 |
img = torch.nn.functional.interpolate(img, size=64, mode="bilinear")
|
@@ -28,11 +28,9 @@ def run_inference(model, img):
|
|
28 |
|
29 |
|
30 |
def localize_anomalies(input_img, preset="edm2-img64-s-fid", load_from_hub=False):
|
31 |
-
device = "cuda"
|
32 |
input_img = input_img.resize(size=(64, 64), resample=Image.Resampling.LANCZOS)
|
33 |
img = np.array(input_img)
|
34 |
img = torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0)
|
35 |
-
img = img.float().to(device)
|
36 |
model, modeldir = load_model_from_hub(preset=preset, device=device)
|
37 |
img_likelihood, score_norms = run_inference(model, img)
|
38 |
nll, pct, ref_nll = compute_gmm_likelihood(
|
|
|
15 |
@spaces.GPU
|
16 |
@torch.no_grad
|
17 |
def run_inference(model, img):
|
18 |
+
img = img.float().to('cuda')
|
19 |
model = model.to('cuda')
|
|
|
20 |
print("model on cuda:", next(model.scorenet.net.parameters()).is_cuda)
|
21 |
print("img on cuda:", img.is_cuda)
|
22 |
img = torch.nn.functional.interpolate(img, size=64, mode="bilinear")
|
|
|
28 |
|
29 |
|
30 |
def localize_anomalies(input_img, preset="edm2-img64-s-fid", load_from_hub=False):
|
|
|
31 |
input_img = input_img.resize(size=(64, 64), resample=Image.Resampling.LANCZOS)
|
32 |
img = np.array(input_img)
|
33 |
img = torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0)
|
|
|
34 |
model, modeldir = load_model_from_hub(preset=preset, device=device)
|
35 |
img_likelihood, score_norms = run_inference(model, img)
|
36 |
nll, pct, ref_nll = compute_gmm_likelihood(
|