Update app.py
Browse files
app.py
CHANGED
@@ -74,15 +74,15 @@ OBJ_ID = 0
|
|
74 |
sam2_checkpoint = "checkpoints/edgetam.pt"
|
75 |
model_cfg = "edgetam.yaml"
|
76 |
predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device="cpu")
|
77 |
-
predictor.to("
|
78 |
print("predictor loaded")
|
79 |
|
80 |
# use bfloat16 for the entire demo
|
81 |
-
torch.autocast(device_type="
|
82 |
-
if torch.cuda.get_device_properties(0).major >= 8:
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
|
87 |
|
88 |
def get_video_fps(video_path):
|
@@ -189,7 +189,7 @@ def preprocess_video_in(video_path, session_state):
|
|
189 |
]
|
190 |
|
191 |
|
192 |
-
|
193 |
def segment_with_points(
|
194 |
point_type,
|
195 |
session_state,
|
@@ -244,7 +244,7 @@ def segment_with_points(
|
|
244 |
mask_image = show_mask((out_mask_logits[0] > 0.0).cpu().numpy())
|
245 |
first_frame_output = Image.alpha_composite(transparent_background, mask_image)
|
246 |
|
247 |
-
torch.cuda.empty_cache()
|
248 |
return selected_point_map, first_frame_output, session_state
|
249 |
|
250 |
|
@@ -303,7 +303,7 @@ def propagate_to_all(
|
|
303 |
output_frame = np.array(output_frame)
|
304 |
output_frames.append(output_frame)
|
305 |
|
306 |
-
torch.cuda.empty_cache()
|
307 |
|
308 |
# Create a video clip from the image sequence
|
309 |
original_fps = get_video_fps(video_in)
|
|
|
74 |
sam2_checkpoint = "checkpoints/edgetam.pt"
|
75 |
model_cfg = "edgetam.yaml"
|
76 |
predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device="cpu")
|
77 |
+
predictor.to("cpu")
|
78 |
print("predictor loaded")
|
79 |
|
80 |
# use bfloat16 for the entire demo
|
81 |
+
torch.autocast(device_type="cpu", dtype=torch.bfloat16).__enter__()
|
82 |
+
# if torch.cuda.get_device_properties(0).major >= 8:
|
83 |
+
# # turn on tfloat32 for Ampere GPUs (https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices)
|
84 |
+
# torch.backends.cuda.matmul.allow_tf32 = True
|
85 |
+
# torch.backends.cudnn.allow_tf32 = True
|
86 |
|
87 |
|
88 |
def get_video_fps(video_path):
|
|
|
189 |
]
|
190 |
|
191 |
|
192 |
+
|
193 |
def segment_with_points(
|
194 |
point_type,
|
195 |
session_state,
|
|
|
244 |
mask_image = show_mask((out_mask_logits[0] > 0.0).cpu().numpy())
|
245 |
first_frame_output = Image.alpha_composite(transparent_background, mask_image)
|
246 |
|
247 |
+
# torch.cuda.empty_cache()
|
248 |
return selected_point_map, first_frame_output, session_state
|
249 |
|
250 |
|
|
|
303 |
output_frame = np.array(output_frame)
|
304 |
output_frames.append(output_frame)
|
305 |
|
306 |
+
# torch.cuda.empty_cache()
|
307 |
|
308 |
# Create a video clip from the image sequence
|
309 |
original_fps = get_video_fps(video_in)
|