import gradio as gr from transformers import AutoImageProcessor, SegformerForSemanticSegmentation, pipeline from PIL import Image, ImageOps, ImageFilter import numpy as np import torch # ----- Load models once ----- seg_model_id = "nvidia/segformer-b0-finetuned-ade-512-512" depth_model_id = "depth-anything/Depth-Anything-V2-Base-hf" seg_processor = AutoImageProcessor.from_pretrained(seg_model_id) seg_model = SegformerForSemanticSegmentation.from_pretrained(seg_model_id) depth_pipe = pipeline( task="depth-estimation", model=depth_model_id, device=0 if torch.cuda.is_available() else -1, ) # ----- Gaussian background blur using segmentation ----- def gaussian_background_blur(img: Image.Image) -> Image.Image: img = ImageOps.fit(img.convert("RGB"), (512, 512), method=Image.BICUBIC) inputs = seg_processor(images=img, return_tensors="pt") with torch.no_grad(): outputs = seg_model(**inputs) logits = outputs.logits upsampled = torch.nn.functional.interpolate( logits, size=(512, 512), mode="bilinear", align_corners=False ) seg = upsampled.argmax(dim=1)[0].cpu().numpy() id2label = seg_model.config.id2label person_ids = [i for i, label in id2label.items() if "person" in label.lower()] mask = np.isin(seg, person_ids).astype(np.uint8) mask_pil = Image.fromarray(mask * 255, mode="L") blurred_bg = img.filter(ImageFilter.GaussianBlur(radius=15)) out = Image.composite(img, blurred_bg, mask_pil) return out # ----- Depth-based lens blur ----- def depth_lens_blur(img: Image.Image) -> Image.Image: img = ImageOps.fit(img.convert("RGB"), (512, 512), method=Image.BICUBIC) depth_output = depth_pipe(img) depth_tensor = depth_output["predicted_depth"] depth_np = depth_tensor.squeeze().cpu().numpy() # normalize, then invert so far = more blur, near = sharp d_min, d_max = depth_np.min(), depth_np.max() depth_norm = (depth_np - d_min) / (d_max - d_min + 1e-8) # [0,1] blur_norm = 1.0 - depth_norm # near≈1 -> 0 blur, far≈0 -> 1 blur max_radius = 15.0 num_levels = 6 radii = np.linspace(0, max_radius, num_levels) blurred_versions = [ img.filter(ImageFilter.GaussianBlur(radius=float(r))) for r in radii ] blurred_np = [np.array(b) for b in blurred_versions] level_size = 1.0 / (num_levels - 1) blur_levels = np.floor(blur_norm / level_size).astype(np.int32) blur_levels = np.clip(blur_levels, 0, num_levels - 1) H, W = blur_levels.shape out_np = np.zeros((H, W, 3), dtype=np.uint8) for lvl in range(num_levels): mask = blur_levels == lvl if not np.any(mask): continue mask_3c = np.repeat(mask[:, :, None], 3, axis=2) out_np[mask_3c] = blurred_np[lvl][mask_3c] return Image.fromarray(out_np) # ----- Gradio UI ----- def apply_effect(img, mode): if img is None: return None if mode == "Gaussian background blur": return gaussian_background_blur(img) elif mode == "Depth-based lens blur": return depth_lens_blur(img) else: return img demo = gr.Interface( fn=apply_effect, inputs=[ gr.Image(type="pil", label="Upload an image"), gr.Radio( ["Gaussian background blur", "Depth-based lens blur"], value="Gaussian background blur", label="Effect", ), ], outputs=gr.Image(label="Output"), title="Gaussian & Depth-based Lens Blur Demo", description="Upload a selfie or scene and choose Gaussian background blur or depth-based lens blur.", ) if __name__ == "__main__": demo.launch()