import json import gradio as gr from PIL import Image import safetensors.torch import spaces import timm from timm.models import VisionTransformer import torch from torchvision.transforms import transforms from torchvision.transforms import InterpolationMode import torchvision.transforms.functional as TF from huggingface_hub import hf_hub_download import numpy as np import matplotlib.cm as cm class Fit(torch.nn.Module): def __init__( self, bounds: tuple[int, int] | int, interpolation = InterpolationMode.LANCZOS, grow: bool = True, pad: float | None = None ): super().__init__() self.bounds = (bounds, bounds) if isinstance(bounds, int) else bounds self.interpolation = interpolation self.grow = grow self.pad = pad def forward(self, img: Image) -> Image: wimg, himg = img.size hbound, wbound = self.bounds hscale = hbound / himg wscale = wbound / wimg if not self.grow: hscale = min(hscale, 1.0) wscale = min(wscale, 1.0) scale = min(hscale, wscale) if scale == 1.0: return img hnew = min(round(himg * scale), hbound) wnew = min(round(wimg * scale), wbound) img = TF.resize(img, (hnew, wnew), self.interpolation) if self.pad is None: return img hpad = hbound - hnew wpad = wbound - wnew tpad = hpad // 2 bpad = hpad - tpad lpad = wpad // 2 rpad = wpad - lpad return TF.pad(img, (lpad, tpad, rpad, bpad), self.pad) def __repr__(self) -> str: return ( f"{self.__class__.__name__}(" + f"bounds={self.bounds}, " + f"interpolation={self.interpolation.value}, " + f"grow={self.grow}, " + f"pad={self.pad})" ) class CompositeAlpha(torch.nn.Module): def __init__( self, background: tuple[float, float, float] | float, ): super().__init__() self.background = (background, background, background) if isinstance(background, float) else background self.background = torch.tensor(self.background).unsqueeze(1).unsqueeze(2) def forward(self, img: torch.Tensor) -> torch.Tensor: if img.shape[-3] == 3: return img alpha = img[..., 3, None, :, :] img[..., :3, :, :] *= alpha background = self.background.expand(-1, img.shape[-2], img.shape[-1]) if background.ndim == 1: background = background[:, None, None] elif background.ndim == 2: background = background[None, :, :] img[..., :3, :, :] += (1.0 - alpha) * background return img[..., :3, :, :] def __repr__(self) -> str: return ( f"{self.__class__.__name__}(" + f"background={self.background})" ) transform = transforms.Compose([ Fit((384, 384)), transforms.ToTensor(), CompositeAlpha(0.5), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True), transforms.CenterCrop((384, 384)), ]) model = timm.create_model( "vit_so400m_patch14_siglip_384.webli", pretrained=False, num_classes=9083, ) # type: VisionTransformer class GatedHead(torch.nn.Module): def __init__(self, num_features: int, num_classes: int ): super().__init__() self.num_classes = num_classes self.linear = torch.nn.Linear(num_features, num_classes * 2) self.act = torch.nn.Sigmoid() self.gate = torch.nn.Sigmoid() def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.linear(x) x = self.act(x[:, :self.num_classes]) * self.gate(x[:, self.num_classes:]) return x model.head = GatedHead(min(model.head.weight.shape), 9083) cached_model = hf_hub_download( repo_id="RedRocket/JointTaggerProject", subfolder="JTP_PILOT2", filename="JTP_PILOT2-e3-vit_so400m_patch14_siglip_384.safetensors" ) safetensors.torch.load_model(model, cached_model) model.eval() with open("tagger_tags.json", "r") as file: tags = json.load(file) # type: dict allowed_tags = list(tags.keys()) for idx, tag in enumerate(allowed_tags): allowed_tags[idx] = tag.replace("_", " ") sorted_tag_score = {} input_image = None @spaces.GPU(duration=5) def run_classifier(image, threshold): global sorted_tag_score, input_image input_image = image.convert('RGBA') img = input_image tensor = transform(img).unsqueeze(0) with torch.no_grad(): probits = model(tensor)[0] values, indices = probits.topk(250) tag_score = dict() for i in range(indices.size(0)): tag_score[allowed_tags[indices[i]]] = values[i].item() sorted_tag_score = dict(sorted(tag_score.items(), key=lambda item: item[1], reverse=True)) return create_tags(threshold) def create_tags(threshold): global sorted_tag_score filtered_tag_score = {key: value for key, value in sorted_tag_score.items() if value > threshold} text_no_impl = ", ".join(filtered_tag_score.keys()) return text_no_impl, filtered_tag_score def clear_image(): global sorted_tag_score, input_image input_image = None sorted_tag_score = {} return "", {} target_tag_index = None # Store hooks and intermediate values gradients = {} activations = {} def hook_forward(module, input, output): activations['value'] = output def hook_backward(module, grad_in, grad_out): gradients['value'] = grad_out[0] def cam_inference(threshold, evt: gr.SelectData): target_tag = evt.value print(f"target_tag: {target_tag}") global input_image, sorted_tag_score, target_tag_index, gradients, activations img = input_image tensor = transform(img).unsqueeze(0) gradients = {} activations = {} cam = None target_tag_index = None if target_tag: if target_tag not in allowed_tags: print(f"Warning: Target tag '{target_tag}' not found in allowed tags.") target_tag = None else: target_tag_index = allowed_tags.index(target_tag) handle_forward = model.norm.register_forward_hook(hook_forward) handle_backward = model.norm.register_full_backward_hook(hook_backward) probits = model(tensor)[0].cpu() if target_tag is not None and target_tag_index is not None: model.zero_grad() target_score = probits[target_tag_index] target_score.backward(retain_graph=True) grads = gradients.get('value') acts = activations.get('value') if grads is not None and acts is not None: patch_grads = grads patch_acts = acts weights = torch.mean(patch_grads, dim=1).squeeze(0) cam_1d = torch.einsum('pe,e->p', patch_acts.squeeze(0), weights) cam_1d = torch.relu(cam_1d) cam = cam_1d.reshape(27, 27).detach().cpu().numpy() handle_forward.remove() handle_backward.remove() gradients = {} activations = {} return create_cam_visualization_pil(cam, vis_threshold=threshold) def create_cam_visualization_pil(cam, alpha=0.6, vis_threshold=0.2): """ Overlays CAM on image and returns a PIL image. Args: image_pil: PIL Image (RGB) cam: 2D numpy array (activation map) alpha: float, blending factor vis_threshold: float, minimum normalized CAM value to show color Returns: PIL.Image.Image with overlay """ global input_image # Convert to RGB (in case RGBA or others) image_pil = input_image w, h = image_pil.size # Resize CAM to match image cam_resized = np.array(Image.fromarray(cam).resize((w, h), resample=Image.Resampling.BILINEAR)) # Normalize CAM to [0, 1] cam_norm = (cam_resized - cam_resized.min()) / (np.ptp(cam_resized) + 1e-8) # Create heatmap using matplotlib colormap colormap = cm.get_cmap('jet') cam_colored = colormap(cam_norm)[:, :, :3] # RGB cam_alpha = (cam_norm >= vis_threshold).astype(np.float32) * alpha # Alpha mask cam_rgba = np.dstack((cam_colored, cam_alpha)) # Shape: (H, W, 4) cam_image = Image.fromarray((cam_rgba * 255).astype(np.uint8), mode="RGBA") # Composite over original composite = Image.alpha_composite(image_pil, cam_image) return composite with gr.Blocks(css=".output-class { display: none; }") as demo: gr.Markdown(""" ## Joint Tagger Project: JTP-PILOT² Demo **BETA** This tagger is designed for use on furry images (though may very well work on out-of-distribution images, potentially with funny results). A threshold of 0.2 is recommended. Lower thresholds often turn up more valid tags, but can also result in some amount of hallucinated tags. This tagger is the result of joint efforts between members of the RedRocket team, with distinctions given to Thessalo for creating the foundation for this project with his efforts, RedHotTensors for redesigning the process into a second-order method that models information expectation, and drhead for dataset prep, creation of training code and supervision of training runs. Special thanks to Minotoro at frosting.ai for providing the compute power for this project. """) with gr.Row(): with gr.Column(): image_input = gr.Image(label="Source", sources=['upload'], type='pil', height=512, show_label=False) threshold_slider = gr.Slider(minimum=0.00, maximum=1.00, step=0.01, value=0.20, label="Threshold") with gr.Column(): tag_string = gr.Textbox(label="Tag String") label_box = gr.Label(label="Tag Predictions", num_top_classes=250, show_label=False) image_input.upload( fn=run_classifier, inputs=[image_input, threshold_slider], outputs=[tag_string, label_box] ) image_input.clear( fn=clear_image, inputs=[], outputs=[tag_string, label_box] ) threshold_slider.input( fn=create_tags, inputs=[threshold_slider], outputs=[tag_string, label_box] ) label_box.select( fn=cam_inference, inputs=[threshold_slider], outputs=[image_input] ) if __name__ == "__main__": demo.launch()