LPX55 commited on
Commit
97af200
·
1 Parent(s): 8e20124
Files changed (2) hide show
  1. app.py +0 -1
  2. sam2_mask.py +0 -69
app.py CHANGED
@@ -347,7 +347,6 @@ def clear_cache():
347
  torch.cuda.empty_cache()
348
  return gr.update(value="Cache cleared!")
349
 
350
- sam2_mask_tab = create_sam2_mask_interface()
351
 
352
  css = """
353
  .nulgradio-container {
 
347
  torch.cuda.empty_cache()
348
  return gr.update(value="Cache cleared!")
349
 
 
350
 
351
  css = """
352
  .nulgradio-container {
sam2_mask.py CHANGED
@@ -1,69 +0,0 @@
1
- # K-I-S-S
2
- import spaces
3
- import gradio as gr
4
- from gradio_image_prompter import ImagePrompter
5
- from sam2.sam2_image_predictor import SAM2ImagePredictor
6
- import torch
7
- import numpy as np
8
- from PIL import Image as PILImage
9
-
10
- # Initialize SAM2 predictor
11
- MODEL = "facebook/sam2.1-hiera-large"
12
- DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
-
14
- @spaces.GPU()
15
-
16
- def predict_masks(image, points):
17
- """Predict a single mask from the image based on selected points."""
18
- image_np = np.array(image)
19
- points_list = [[point["x"], point["y"]] for point in points]
20
- input_labels = [1] * len(points_list)
21
-
22
- with torch.inference_mode():
23
- PREDICTOR.set_image(image_np)
24
- masks, _, _ = PREDICTOR.predict(
25
- point_coords=points_list, point_labels=input_labels, multimask_output=False
26
- )
27
-
28
- # Prepare the overlay image
29
- red_mask = np.zeros_like(image_np)
30
- if masks and len(masks) > 0:
31
- red_mask[:, :, 0] = masks[0].astype(np.uint8) * 255 # Apply the red channel
32
- red_mask = PILImage.fromarray(red_mask)
33
- original_image = PILImage.fromarray(image_np)
34
- blended_image = PILImage.blend(original_image, red_mask, alpha=0.5)
35
- return np.array(blended_image)
36
- else:
37
- return image_np
38
-
39
-
40
- def create_sam2_mask_interface():
41
- """Create the Gradio interface for SAM2 mask generation."""
42
- def update_mask(prompts):
43
- """Update the mask based on the prompts."""
44
- image = prompts["image"]
45
- points = prompts["points"]
46
- return predict_masks(image, points)
47
-
48
- with gr.Blocks() as sam2_mask_tab:
49
- gr.Markdown("# Object Segmentation with SAM2")
50
- gr.Markdown(
51
- """
52
- This application utilizes **Segment Anything V2 (SAM2)** to allow you to upload an image and interactively generate a segmentation mask based on multiple points you select on the image.
53
- """
54
- )
55
- with gr.Row():
56
- with gr.Column():
57
- upload_image_input = ImagePrompter(show_label=False)
58
- with gr.Column():
59
- image_output = gr.Image(label="Segmented Image", type="pil", height=400)
60
-
61
- # Define the action triggered by the upload_image_input change
62
- upload_image_input.change(
63
- fn=update_mask,
64
- inputs=[upload_image_input],
65
- outputs=[image_output],
66
- show_progress=True,
67
- )
68
-
69
- return sam2_mask_tab