Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -4,13 +4,28 @@ import numpy as np
|
|
4 |
from transformers import DPTForDepthEstimation, DPTImageProcessor
|
5 |
import gradio as gr
|
6 |
import torch.quantization
|
|
|
7 |
|
8 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
9 |
model = DPTForDepthEstimation.from_pretrained("Intel/dpt-swinv2-tiny-256", torch_dtype=torch.float32)
|
10 |
model.eval()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
model = torch.quantization.quantize_dynamic(
|
12 |
model, {torch.nn.Linear, torch.nn.Conv2d}, dtype=torch.qint8
|
13 |
).to(device)
|
|
|
14 |
processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256")
|
15 |
|
16 |
color_map = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_INFERNO)
|
|
|
4 |
from transformers import DPTForDepthEstimation, DPTImageProcessor
|
5 |
import gradio as gr
|
6 |
import torch.quantization
|
7 |
+
import torch.nn.utils.prune as prune
|
8 |
|
9 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
10 |
+
|
11 |
model = DPTForDepthEstimation.from_pretrained("Intel/dpt-swinv2-tiny-256", torch_dtype=torch.float32)
|
12 |
model.eval()
|
13 |
+
|
14 |
+
# Apply global unstructured pruning
|
15 |
+
parameters_to_prune = [
|
16 |
+
(module, "weight") for module in filter(lambda m: isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)), model.modules())
|
17 |
+
]
|
18 |
+
prune.global_unstructured(
|
19 |
+
parameters_to_prune,
|
20 |
+
pruning_method=prune.L1Unstructured,
|
21 |
+
amount=0.4, # Prune 40% of weights
|
22 |
+
)
|
23 |
+
|
24 |
+
# Apply quantization after pruning
|
25 |
model = torch.quantization.quantize_dynamic(
|
26 |
model, {torch.nn.Linear, torch.nn.Conv2d}, dtype=torch.qint8
|
27 |
).to(device)
|
28 |
+
|
29 |
processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256")
|
30 |
|
31 |
color_map = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_INFERNO)
|