import gradio as gr import numpy as np import torch from PIL import Image import cv2 import requests from transformers import pipeline # Load the depth estimation pipeline pipe = pipeline(task="depth-estimation", model="depth-anything/Depth-Anything-V2-Small-hf") def apply_depth_aware_blur_inverse( image, foreground_blur, midground_blur, background_blur, foreground_threshold, midground_lower, midground_upper, background_threshold, ): original_image = Image.fromarray(image).convert("RGB") original_image = original_image.resize((512, 512)) image_np = np.array(original_image) # Inference depth = pipe(original_image)["depth"] depth = np.array(depth) # Convert to numpy array depth = cv2.resize(depth, (512, 512), interpolation=cv2.INTER_CUBIC) # Resize depth map # Normalize the depth map normalized_depth_map = (depth - np.min(depth)) / (np.max(depth) - np.min(depth)) # Create masks (Inverted Logic) foreground_mask = (normalized_depth_map >= foreground_threshold).astype(np.uint8) * 255 midground_mask = ((normalized_depth_map < foreground_threshold) & (normalized_depth_map >= background_threshold)).astype(np.uint8) * 255 background_mask = (normalized_depth_map < background_threshold).astype(np.uint8) * 255 blurred_image = np.copy(np.array(original_image)) # Apply blur, ensuring kernel size is valid (Inverted Blur) if foreground_blur > 0 and foreground_blur % 2 == 1: blurred_image = np.where( (foreground_mask[..., None] == 255), cv2.GaussianBlur(blurred_image, (foreground_blur, foreground_blur), 10), blurred_image, ) if midground_blur > 0 and midground_blur % 2 == 1: blurred_image = np.where( (midground_mask[..., None] == 255), cv2.GaussianBlur(blurred_image, (midground_blur, midground_blur), 8), blurred_image, ) if background_blur > 0 and background_blur % 2 == 1: blurred_image = np.where( (background_mask[..., None] == 255), cv2.GaussianBlur(blurred_image, (background_blur, background_blur), 20), blurred_image, ) return Image.fromarray(blurred_image.astype(np.uint8)) # Example input values (including defaults) example_image = np.zeros((512, 512, 3), dtype=np.uint8) # Placeholder for an image example_inputs = [ example_image, 35, # foreground_blur 7, # midground_blur 15, # background_blur (default) 0.6, # foreground_threshold (default) 0.6, # midground_lower (default) 0.2, # midground_upper (default) 0.2, # background_threshold (default) ] iface = gr.Interface( fn=apply_depth_aware_blur_inverse, # Changed function name inputs=[ gr.Image(label="Input Image"), gr.Slider(1, 51, step=2, label="Foreground Blur Kernel Size"), gr.Slider(1, 51, step=2, label="Midground Blur Kernel Size"), gr.Slider(1, 51, step=2, label="Background Blur Kernel Size"), gr.Slider(0, 1, label="Foreground Threshold"), gr.Slider(0, 1, label="Midground Lower Threshold"), gr.Slider(0, 1, label="Midground Upper Threshold"), gr.Slider(0, 1, label="Background Threshold"), ], outputs=gr.Image(label="Blurred Image"), title="Inverse Depth-Aware Lens Blur App", # Changed title description="Apply inverse depth-based blur to uploaded images using Depth Anything V2. Closer objects are blurred, farther objects are sharper.", # Changed description examples=[example_inputs], # Provide example inputs ) if __name__ == "__main__": iface.launch()