BlurMaster / app.py
ArunLouis's picture
Upload 2 files
2aa301a verified
raw
history blame
3.48 kB
import gradio as gr
import numpy as np
import torch
from PIL import Image
import cv2
import os # Import the os module
from transformers import DPTImageProcessor, DPTForDepthEstimation
# Access the token from the Space's secrets
HF_TOKEN = os.environ.get("HUGGING_FACE_TOKEN")
if HF_TOKEN is None:
raise ValueError("Hugging Face token not found. Make sure you've added it as a secret to your Space.")
# Load the image processor and model
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas", token=HF_TOKEN)
model = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas", low_cpu_mem_usage=True, token=HF_TOKEN)
model.to('cuda')
model.eval()
def apply_depth_aware_blur(image, foreground_blur, midground_blur, background_blur, foreground_threshold, midground_lower, midground_upper, background_threshold):
original_image = Image.fromarray(image).convert("RGB")
original_image = original_image.resize((512, 512))
image_np = np.array(original_image)
inputs = image_processor(images=original_image, return_tensors="pt").to('cuda')
with torch.no_grad():
outputs = model(**inputs)
predicted_depth = outputs.predicted_depth
prediction = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1),
size=(512, 512),
mode="bicubic",
align_corners=False,
)
depth_map = prediction.squeeze().cpu().numpy()
normalized_depth_map = (depth_map - np.min(depth_map)) / (np.max(depth_map) - np.min(depth_map))
foreground_mask = (normalized_depth_map < foreground_threshold).astype(np.uint8) * 255
midground_mask = ((normalized_depth_map >= midground_lower) & (normalized_depth_map < midground_upper)).astype(np.uint8) * 255
background_mask = (normalized_depth_map >= background_threshold).astype(np.uint8) * 255
blurred_image = np.copy(np.array(original_image))
if foreground_blur > 0:
blurred_image = np.where((foreground_mask[..., None] == 255), cv2.GaussianBlur(blurred_image, (foreground_blur, foreground_blur), 10), blurred_image)
if midground_blur > 0:
blurred_image = np.where((midground_mask[..., None] == 255), cv2.GaussianBlur(blurred_image, (midground_blur, midground_blur), 8), blurred_image)
if background_blur > 0:
blurred_image = np.where((background_mask[..., None] == 255), cv2.GaussianBlur(blurred_image, (background_blur, background_blur), 20), blurred_image)
return Image.fromarray(blurred_image.astype(np.uint8))
iface = gr.Interface(
fn=apply_depth_aware_blur,
inputs=[
gr.Image(label="Input Image"),
gr.Slider(0, 51, step=2, label="Foreground Blur Kernel Size", default=0),
gr.Slider(0, 51, step=2, label="Midground Blur Kernel Size", default=0),
gr.Slider(0, 51, step=2, label="Background Blur Kernel Size", default=35),
gr.Slider(0, 1, label="Foreground Threshold", default=0.2),
gr.Slider(0, 1, label="Midground Lower Threshold", default=0.2),
gr.Slider(0, 1, label="Midground Upper Threshold", default=0.6),
gr.Slider(0, 1, label="Background Threshold", default=0.6)
],
outputs=gr.Image(label="Blurred Image"),
title="Depth-Aware Lens Blur App",
description="Apply depth-based blur to uploaded images. Adjust blur intensity for foreground, midground, and background.",
)
if __name__ == "__main__":
iface.launch()