Update app.py
Browse files
app.py
CHANGED
@@ -1,229 +1,78 @@
|
|
1 |
#!/usr/bin/env python3
|
2 |
# -*- coding: utf-8 -*-
|
3 |
"""
|
4 |
-
|
5 |
-
@author: perghect
|
6 |
"""
|
7 |
import gradio as gr
|
8 |
-
import
|
9 |
-
import io
|
10 |
-
import torch
|
11 |
-
import numpy as np
|
12 |
from PIL import Image, ImageFilter
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
# Set device and precision
|
17 |
-
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
18 |
-
torch.set_float32_matmul_precision('high')
|
19 |
-
|
20 |
-
# Load models at startup
|
21 |
-
rmbg_model = AutoModelForImageSegmentation.from_pretrained("briaai/RMBG-2.0", trust_remote_code=True).to(device).eval()
|
22 |
-
depth_processor = AutoImageProcessor.from_pretrained("depth-anything/Depth-Anything-V2-Small-hf")
|
23 |
-
depth_model = AutoModelForDepthEstimation.from_pretrained("depth-anything/Depth-Anything-V2-Small-hf").to(device)
|
24 |
-
|
25 |
-
def load_image_from_link(url: str) -> Image.Image:
|
26 |
-
"""Downloads an image from a URL and returns a Pillow Image."""
|
27 |
-
response = requests.get(url)
|
28 |
-
response.raise_for_status()
|
29 |
-
image = Image.open(io.BytesIO(response.content)).convert("RGB")
|
30 |
-
return image
|
31 |
-
|
32 |
-
# Gaussian Blur Functions
|
33 |
-
def run_rmbg(image: Image.Image, threshold=0.5):
|
34 |
-
"""Runs the RMBG-2.0 model on the image and returns a binary mask."""
|
35 |
-
try:
|
36 |
-
image_size = (1024, 1024)
|
37 |
-
transform_image = transforms.Compose([
|
38 |
-
transforms.Resize(image_size),
|
39 |
-
transforms.ToTensor(),
|
40 |
-
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
41 |
-
])
|
42 |
-
|
43 |
-
input_images = transform_image(image).unsqueeze(0).to(device)
|
44 |
-
|
45 |
-
with torch.no_grad():
|
46 |
-
preds = rmbg_model(input_images)
|
47 |
-
if isinstance(preds, list):
|
48 |
-
mask_logits = preds[-1]
|
49 |
-
else:
|
50 |
-
raise ValueError(f"Unexpected output format: {type(preds)}")
|
51 |
-
|
52 |
-
mask_prob = mask_logits.sigmoid().cpu()[0].squeeze()
|
53 |
-
pred_pil = transforms.ToPILImage()(mask_prob)
|
54 |
-
mask_pil = pred_pil.resize(image.size, resample=Image.BILINEAR)
|
55 |
-
|
56 |
-
mask_np = np.array(mask_pil, dtype=np.uint8) / 255.0
|
57 |
-
binary_mask = (mask_np > threshold).astype(np.uint8)
|
58 |
-
return binary_mask
|
59 |
-
except Exception as e:
|
60 |
-
raise Exception(f"Error in background removal: {str(e)}")
|
61 |
-
|
62 |
-
def apply_background_blur(image: Image.Image, mask: np.ndarray, sigma: float = 15):
|
63 |
-
"""Applies a Gaussian blur to the background while keeping the foreground sharp."""
|
64 |
-
image_np = np.array(image)
|
65 |
-
mask_np = mask.astype(np.uint8)
|
66 |
-
|
67 |
-
blurred_image = image.filter(ImageFilter.GaussianBlur(radius=sigma))
|
68 |
-
blurred_np = np.array(blurred_image)
|
69 |
-
|
70 |
-
output_np = np.where(mask_np[..., None] == 1, image_np, blurred_np)
|
71 |
-
output_image = Image.fromarray(output_np.astype(np.uint8))
|
72 |
-
return output_image
|
73 |
-
|
74 |
-
# Lens Blur Functions
|
75 |
-
def run_depth_estimation(image: Image.Image, target_size=(512, 512)):
|
76 |
-
"""Runs the Depth-Anything-V2-Small model and returns the depth map."""
|
77 |
-
try:
|
78 |
-
image_resized = image.resize(target_size, resample=Image.BILINEAR)
|
79 |
-
inputs = depth_processor(images=image_resized, return_tensors="pt").to(device)
|
80 |
-
|
81 |
-
with torch.no_grad():
|
82 |
-
outputs = depth_model(**inputs)
|
83 |
-
predicted_depth = outputs.predicted_depth
|
84 |
-
|
85 |
-
prediction = torch.nn.functional.interpolate(
|
86 |
-
predicted_depth.unsqueeze(1),
|
87 |
-
size=image.size[::-1],
|
88 |
-
mode="bicubic",
|
89 |
-
align_corners=False,
|
90 |
-
)
|
91 |
-
|
92 |
-
depth_map = prediction.squeeze().cpu().numpy()
|
93 |
-
depth_max = depth_map.max()
|
94 |
-
depth_min = depth_map.min()
|
95 |
-
if depth_max == depth_min:
|
96 |
-
depth_max = depth_min + 1e-6 # Avoid division by zero
|
97 |
-
depth_map = (depth_map - depth_min) / (depth_max - depth_min)
|
98 |
-
depth_map = 1 - depth_map # Invert: higher values = farther
|
99 |
-
return depth_map
|
100 |
-
except Exception as e:
|
101 |
-
raise Exception(f"Error in depth estimation: {str(e)}")
|
102 |
-
|
103 |
-
def apply_depth_based_blur(image: Image.Image, depth_map: np.ndarray, max_radius: float = 15, foreground_percentile: float = 30):
|
104 |
-
"""Applies a variable Gaussian blur based on the depth map."""
|
105 |
-
image_np = np.array(image)
|
106 |
-
|
107 |
-
if depth_map.shape != image_np.shape[:2]:
|
108 |
-
depth_map = np.array(Image.fromarray(depth_map).resize(image.size, resample=Image.BILINEAR))
|
109 |
-
|
110 |
-
foreground_threshold = np.percentile(depth_map.flatten(), foreground_percentile)
|
111 |
-
|
112 |
-
output_np = np.zeros_like(image_np)
|
113 |
-
mask_foreground = (depth_map <= foreground_threshold)
|
114 |
-
output_np[mask_foreground] = image_np[mask_foreground]
|
115 |
-
|
116 |
-
depth_max = depth_map.max()
|
117 |
-
depth_range = depth_max - foreground_threshold
|
118 |
-
if depth_range == 0:
|
119 |
-
depth_range = 1e-6
|
120 |
-
normalized_depth = np.zeros_like(depth_map)
|
121 |
-
mask_above_foreground = (depth_map > foreground_threshold)
|
122 |
-
normalized_depth[mask_above_foreground] = (depth_map[mask_above_foreground] - foreground_threshold) / depth_range
|
123 |
-
normalized_depth = np.clip(normalized_depth, 0, 1)
|
124 |
-
|
125 |
-
depth_levels = np.linspace(0, 1, 20)
|
126 |
-
for i in range(len(depth_levels) - 1):
|
127 |
-
depth_min = depth_levels[i]
|
128 |
-
depth_max = depth_levels[i + 1]
|
129 |
-
mask = (normalized_depth >= depth_min) & (normalized_depth < depth_max) & (depth_map > foreground_threshold)
|
130 |
-
if not np.any(mask):
|
131 |
-
continue
|
132 |
-
|
133 |
-
avg_depth = (depth_min + depth_max) / 2
|
134 |
-
blur_radius = max_radius * avg_depth
|
135 |
-
|
136 |
-
blurred_image = image.filter(ImageFilter.GaussianBlur(radius=blur_radius))
|
137 |
-
blurred_np = np.array(blurred_image)
|
138 |
-
output_np[mask] = blurred_np[mask]
|
139 |
-
|
140 |
-
mask_farthest = (normalized_depth >= depth_levels[-1]) & (depth_map > foreground_threshold)
|
141 |
-
if np.any(mask_farthest):
|
142 |
-
blurred_max = image.filter(ImageFilter.GaussianBlur(radius=max_radius))
|
143 |
-
output_np[mask_farthest] = np.array(blurred_max)[mask_farthest]
|
144 |
-
|
145 |
-
output_image = Image.fromarray(output_np.astype(np.uint8))
|
146 |
-
return output_image
|
147 |
-
|
148 |
-
# Main Processing Function for Gradio
|
149 |
-
def process_image(image, blur_type, sigma=15, max_radius=15, foreground_percentile=30):
|
150 |
-
"""Processes the image based on the selected blur type."""
|
151 |
-
if image is None:
|
152 |
-
return None, "Please upload an image."
|
153 |
-
|
154 |
-
try:
|
155 |
-
image = Image.fromarray(image).convert("RGB")
|
156 |
-
except Exception as e:
|
157 |
-
return None, f"Error processing image: {str(e)}"
|
158 |
-
|
159 |
-
# Resize image if too large
|
160 |
-
max_size = (1024, 1024)
|
161 |
-
if image.size[0] > max_size[0] or image.size[1] > max_size[1]:
|
162 |
-
image.thumbnail(max_size, Image.Resampling.LANCZOS)
|
163 |
-
|
164 |
-
try:
|
165 |
-
if blur_type == "Gaussian Blur":
|
166 |
-
mask = run_rmbg(image, threshold=0.5)
|
167 |
-
output_image = apply_background_blur(image, mask, sigma=sigma)
|
168 |
-
title = f"Gaussian Blur (sigma={sigma})"
|
169 |
-
else: # Lens Blur
|
170 |
-
depth_map = run_depth_estimation(image, target_size=(512, 512))
|
171 |
-
output_image = apply_depth_based_blur(image, depth_map, max_radius=max_radius, foreground_percentile=foreground_percentile)
|
172 |
-
title = f"Lens Blur (max_radius={max_radius}, foreground_percentile={foreground_percentile})"
|
173 |
-
except Exception as e:
|
174 |
-
return None, f"Error applying blur: {str(e)}"
|
175 |
-
|
176 |
-
return output_image, title
|
177 |
-
|
178 |
-
# Gradio Interface with Conditional Parameter Display
|
179 |
-
with gr.Blocks() as demo:
|
180 |
-
gr.Markdown("# Image Blur Effects with Gaussian and Lens Blur")
|
181 |
-
gr.Markdown("""
|
182 |
-
This app applies blur effects to your images. Follow these steps to use it:
|
183 |
-
**Note**: This app is hosted on Hugging Face Spaces’ free tier and may go to "Sleeping" mode after 48 hours of inactivity. If it doesn’t load immediately, please wait a few seconds while it wakes up.
|
184 |
-
1. **Upload an Image**: Click the "Upload Image" box to upload an image from your device.
|
185 |
-
2. **Choose a Blur Type**:
|
186 |
-
- **Gaussian Blur**: Applies a uniform blur to the background, keeping the foreground sharp. Adjust the sigma parameter to control blur intensity.
|
187 |
-
- **Lens Blur**: Applies a depth-based blur, simulating a depth-of-field effect (closer objects are sharp, farther objects are blurred). Adjust the max radius and foreground percentile to fine-tune the effect.
|
188 |
-
3. **Adjust Parameters**:
|
189 |
-
- For Gaussian Blur, use the "Gaussian Blur Sigma" slider to control blur intensity (higher values = more blur).
|
190 |
-
- For Lens Blur, use the "Max Blur Radius" slider to control the maximum blur intensity and the "Foreground Percentile" slider to adjust the depth threshold for the foreground.
|
191 |
-
4. **Apply the Blur**: Click the "Apply Blur" button to process the image.
|
192 |
-
5. **View the Result**: The processed image will appear in the "Output Image" box, along with a description of the effect applied.
|
193 |
-
**Example**: Try uploading an image with a clear foreground and background (e.g., a person in front of a landscape) to see the effects in action.
|
194 |
-
""")
|
195 |
-
|
196 |
-
with gr.Row():
|
197 |
-
image_input = gr.Image(label="Upload Image", type="numpy")
|
198 |
-
with gr.Column():
|
199 |
-
blur_type = gr.Radio(choices=["Gaussian Blur", "Lens Blur"], label="Blur Type", value="Gaussian Blur")
|
200 |
-
sigma = gr.Slider(minimum=1, maximum=50, step=1, value=15, label="Gaussian Blur Sigma", visible=True)
|
201 |
-
max_radius = gr.Slider(minimum=1, maximum=50, step=1, value=15, label="Max Lens Blur Radius", visible=False)
|
202 |
-
foreground_percentile = gr.Slider(minimum=1, maximum=50, step=1, value=30, label="Foreground Percentile", visible=False)
|
203 |
-
|
204 |
-
# Update visibility of parameters based on blur type
|
205 |
-
def update_visibility(blur_type):
|
206 |
-
if blur_type == "Gaussian Blur":
|
207 |
-
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
|
208 |
-
else: # Lens Blur
|
209 |
-
return gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)
|
210 |
-
|
211 |
-
blur_type.change(
|
212 |
-
fn=update_visibility,
|
213 |
-
inputs=blur_type,
|
214 |
-
outputs=[sigma, max_radius, foreground_percentile]
|
215 |
-
)
|
216 |
-
|
217 |
-
process_button = gr.Button("Apply Blur")
|
218 |
-
with gr.Row():
|
219 |
-
output_image = gr.Image(label="Output Image")
|
220 |
-
output_text = gr.Textbox(label="Effect Applied")
|
221 |
-
|
222 |
-
process_button.click(
|
223 |
-
fn=process_image,
|
224 |
-
inputs=[image_input, blur_type, sigma, max_radius, foreground_percentile],
|
225 |
-
outputs=[output_image, output_text]
|
226 |
-
)
|
227 |
|
228 |
-
#
|
229 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
#!/usr/bin/env python3
|
2 |
# -*- coding: utf-8 -*-
|
3 |
"""
|
4 |
+
@author: Nikhil Kunjoor
|
|
|
5 |
"""
|
6 |
import gradio as gr
|
7 |
+
from transformers import pipeline
|
|
|
|
|
|
|
8 |
from PIL import Image, ImageFilter
|
9 |
+
import numpy as np
|
10 |
+
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
# Load models from Hugging Face
|
13 |
+
segmentation_model = pipeline("image-segmentation", model="nvidia/segformer-b1-finetuned-cityscapes-1024-1024")
|
14 |
+
depth_estimator = pipeline("depth-estimation", model="Intel/dpt-large")
|
15 |
+
|
16 |
+
def apply_gaussian_blur(image, mask, sigma):
|
17 |
+
blurred = image.filter(ImageFilter.GaussianBlur(sigma))
|
18 |
+
return Image.composite(image, blurred, mask)
|
19 |
+
|
20 |
+
def apply_lens_blur(image, depth_map, sigma):
|
21 |
+
depth_array = np.array(depth_map)
|
22 |
+
normalized_depth = (depth_array - np.min(depth_array)) / (np.max(depth_array) - np.min(depth_array))
|
23 |
+
|
24 |
+
blurred = image.copy()
|
25 |
+
for x in range(image.width):
|
26 |
+
for y in range(image.height):
|
27 |
+
blur_intensity = normalized_depth[y, x] * sigma
|
28 |
+
local_blur = image.crop((x-1, y-1, x+2, y+2)).filter(ImageFilter.GaussianBlur(blur_intensity))
|
29 |
+
blurred.putpixel((x, y), local_blur.getpixel((1, 1)))
|
30 |
+
return blurred
|
31 |
+
|
32 |
+
def process_image(image, blur_type, sigma):
|
33 |
+
# Perform segmentation
|
34 |
+
segmentation_results = segmentation_model(image)
|
35 |
+
person_mask = None
|
36 |
+
for segment in segmentation_results:
|
37 |
+
if segment['label'] == 'person':
|
38 |
+
person_mask = Image.fromarray((segment['mask'] * 255).astype(np.uint8))
|
39 |
+
break
|
40 |
+
|
41 |
+
if person_mask is None:
|
42 |
+
person_mask = Image.new('L', image.size, 255) # Create a white mask if no person is detected
|
43 |
+
|
44 |
+
# Perform depth estimation
|
45 |
+
depth_results = depth_estimator(image)
|
46 |
+
depth_map = depth_results["depth"]
|
47 |
+
|
48 |
+
# Normalize depth map for visualization
|
49 |
+
depth_array = np.array(depth_map)
|
50 |
+
normalized_depth = (depth_array - np.min(depth_array)) / (np.max(depth_array) - np.min(depth_array)) * 255
|
51 |
+
depth_visualization = Image.fromarray(normalized_depth.astype(np.uint8))
|
52 |
+
|
53 |
+
# Apply selected blur effect
|
54 |
+
if blur_type == "Gaussian Blur":
|
55 |
+
output_image = apply_gaussian_blur(image, person_mask, sigma)
|
56 |
+
else: # Lens Blur
|
57 |
+
output_image = apply_lens_blur(image, depth_map, sigma)
|
58 |
+
|
59 |
+
return person_mask, depth_visualization, output_image
|
60 |
+
|
61 |
+
# Create Gradio interface
|
62 |
+
iface = gr.Interface(
|
63 |
+
fn=process_image,
|
64 |
+
inputs=[
|
65 |
+
gr.Image(type="pil", label="Upload Image"),
|
66 |
+
gr.Radio(["Gaussian Blur", "Lens Blur"], label="Blur Type", value="Gaussian Blur"),
|
67 |
+
gr.Slider(0, 50, step=1, label="Blur Intensity (Sigma)", value=15)
|
68 |
+
],
|
69 |
+
outputs=[
|
70 |
+
gr.Image(type="pil", label="Segmentation Mask"),
|
71 |
+
gr.Image(type="pil", label="Depth Map"),
|
72 |
+
gr.Image(type="pil", label="Output Image")
|
73 |
+
],
|
74 |
+
title="Vision Transformer Segmentation & Depth-Based Blur Effects",
|
75 |
+
description="Upload an image to apply segmentation and lens blur effects. Adjust the blur type and intensity using the controls below."
|
76 |
+
)
|
77 |
+
|
78 |
+
iface.launch()
|