Alessio Grancini commited on
Commit
f796a73
·
verified ·
1 Parent(s): 85008f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -8
app.py CHANGED
@@ -11,18 +11,37 @@ from image_segmenter import ImageSegmenter
11
  from monocular_depth_estimator import MonocularDepthEstimator
12
  from point_cloud_generator import display_pcd
13
 
14
- import os
15
- import torch
16
 
17
- # Let ZeroGPU handle initialization
18
- try:
19
- device = torch.device("cuda" if spaces.is_gpu_available() else "cpu")
20
- print(f"🔹 Running inference on {device}")
21
- except Exception as e:
22
- print(f"❌ ZeroGPU failed to initialize: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  device = torch.device("cpu")
24
 
25
 
 
26
  # params
27
  CANCEL_PROCESSING = False
28
 
 
11
  from monocular_depth_estimator import MonocularDepthEstimator
12
  from point_cloud_generator import display_pcd
13
 
 
 
14
 
15
+
16
+ import subprocess
17
+
18
+ # Check if CUDA is available
19
+ def check_gpu():
20
+ try:
21
+ # Run nvidia-smi to check for GPU
22
+ gpu_check = subprocess.run(["nvidia-smi"], capture_output=True, text=True)
23
+ if "NVIDIA-SMI" in gpu_check.stdout:
24
+ print("✅ GPU is available!")
25
+ return True
26
+ else:
27
+ print("❌ No GPU detected!")
28
+ return False
29
+ except FileNotFoundError:
30
+ print("❌ No nvidia-smi found! GPU likely unavailable.")
31
+ return False
32
+
33
+ # Set device based on GPU availability
34
+ if check_gpu() and torch.cuda.is_available():
35
+ print(f"✅ CUDA available: {torch.cuda.get_device_name(0)}")
36
+ device = torch.device("cuda")
37
+ torch.cuda.empty_cache()
38
+ else:
39
+ print("❌ No CUDA available. Falling back to CPU.")
40
+ os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
41
  device = torch.device("cpu")
42
 
43
 
44
+
45
  # params
46
  CANCEL_PROCESSING = False
47