File size: 5,466 Bytes
cc7361e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c092fae
 
 
 
 
 
 
 
cc7361e
 
c092fae
 
467cdf6
 
c092fae
 
 
467cdf6
 
c092fae
 
 
 
467cdf6
c092fae
 
 
467cdf6
c092fae
 
 
 
467cdf6
c092fae
 
 
 
cc7361e
 
 
 
 
467cdf6
 
cc7361e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
467cdf6
cc7361e
 
 
 
abd2d5d
 
 
 
cc7361e
abd2d5d
 
 
 
 
 
 
467cdf6
abd2d5d
 
 
 
 
 
 
467cdf6
abd2d5d
 
 
 
c092fae
9bc98e3
467cdf6
 
 
 
 
 
 
 
a739207
467cdf6
 
 
 
cc7361e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
import gradio as gr
import torch
import numpy as np
from PIL import Image
from scipy.ndimage import gaussian_filter
from transformers import pipeline

def preprocess_image(image):
    """Resize and convert image to PIL format if needed."""
    if isinstance(image, np.ndarray):
        image = Image.fromarray(image)
    
    # Resize to 512x512 while maintaining aspect ratio
    image = image.resize((512, 512))
    return image

def segment_image(image, model_name="yolov8n-seg"):
    """
    Perform instance segmentation on the input image using YOLO segmentation model.
    """
    from ultralytics import YOLO
    
    # Load the YOLO segmentation model
    model = YOLO(model_name)
    
    # Run inference
    results = model(image)
    
    # Create a blank mask (1 for foreground, 0 for background)
    mask = np.zeros((image.size[1], image.size[0]), dtype=np.uint8)
    
    # Process each detected object
    for result in results:
        if result.masks is not None:
            for single_mask in result.masks:
                # Convert mask to numpy and resize if needed
                mask_array = single_mask.data.cpu().numpy().squeeze()
                mask_array = (mask_array > 0.5).astype(np.uint8)
                
                # Resize if needed
                if mask_array.shape != mask.shape:
                    mask_array = np.array(
                        Image.fromarray(mask_array).resize(
                            (image.size[0], image.size[1]), 
                            Image.NEAREST
                        )
                    )
                
                # Add this mask to the overall mask (OR operation)
                mask = np.maximum(mask, mask_array)
    
    return mask

def apply_gaussian_blur(image, sigma=15):
    """Apply Gaussian blur to the background."""
    # Convert image to numpy array
    image_array = np.array(image)
    
    # Get segmentation mask (1 for foreground, 0 for background)
    foreground_mask = segment_image(image)
    
    # Prepare blurred version
    blurred = np.zeros_like(image_array)
    for channel in range(3):
        blurred[:, :, channel] = gaussian_filter(image_array[:, :, channel], sigma=sigma)
    
    # Combine original and blurred images based on mask
    mask_3d = np.stack([foreground_mask] * 3, axis=2)
    result = image_array * mask_3d + blurred * (1 - mask_3d)
    
    return Image.fromarray(result.astype(np.uint8))

def estimate_depth(image, model_name="depth-anything/Depth-Anything-V2-Small-hf"):
    """Estimate depth of the image."""
    depth_estimator = pipeline(
        task="depth-estimation", 
        model=model_name,
        torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
    )
    
    depth_output = depth_estimator(image)
    depth_map = np.array(depth_output["depth"])
    
    # Normalize depth map (0-1 where 1 is farthest)
    depth_map = (depth_map - depth_map.min()) / (depth_map.max() - depth_map.min())
    
    return depth_map

def apply_depth_aware_blur(image, max_sigma=15, min_sigma=0):
    """Apply depth-aware blur with farther objects more blurred."""
    # Estimate depth (1 = nearest, 0 = farthest)
    depth_map = estimate_depth(image)  # Returns 1 for near, 0 for far
    
    # INVERT the depth map (now 1 = farthest, 0 = nearest)
    depth_map = 1 - depth_map
    
    image_array = np.array(image)
    
    # Create single blurred version at max sigma
    max_blurred = np.zeros_like(image_array, dtype=np.float32)
    for channel in range(3):
        max_blurred[:, :, channel] = gaussian_filter(
            image_array[:, :, channel].astype(np.float32),
            sigma=max_sigma
        )
    
    # Create 3-channel depth map for blending
    depth_3d = np.stack([depth_map] * 3, axis=2)
    
    # Blend: More depth (farther) = more blur
    result = image_array * (1 - depth_3d) + max_blurred * depth_3d
    
    return Image.fromarray(result.astype(np.uint8))

def process_image(image, blur_type, sigma=15):
    """Process image based on blur type."""
    # Preprocess image
    pil_image = preprocess_image(image)
    
    # Apply appropriate blur
    if blur_type == "Gaussian Background Blur":
        result = apply_gaussian_blur(pil_image, sigma)
    elif blur_type == "Depth-Aware Lens Blur":
        result = apply_depth_aware_blur(pil_image, max_sigma=sigma)
    else:
        result = pil_image
    
    return result

# Gradio Interface
def create_blur_app():
    with gr.Blocks() as demo:
        gr.Markdown("# Image Blur Effects")
        
        with gr.Row():
            input_image = gr.Image(label="Input Image", type="pil")
            output_image = gr.Image(label="Processed Image")
        
        with gr.Row():
            blur_type = gr.Dropdown(
                choices=[
                    "Gaussian Background Blur", 
                    "Depth-Aware Lens Blur"
                ], 
                label="Blur Type"
            )
            sigma = gr.Slider(
                minimum=0, 
                maximum=30, 
                value=15, 
                label="Blur Intensity"
            )
        
        process_btn = gr.Button("Apply Blur Effect")
        
        process_btn.click(
            fn=process_image, 
            inputs=[input_image, blur_type, sigma], 
            outputs=output_image
        )
    
    return demo

# Launch the app
if __name__ == "__main__":
    demo = create_blur_app()
    demo.launch()