File size: 10,591 Bytes
08d5d7c 7852d18 08d5d7c eb6df80 ff9a3d4 b545f1a 08d5d7c fce5c47 b0b91fe eb6df80 b0b91fe 08d5d7c 9d80bc0 08d5d7c 3253eac 08d5d7c 7852d18 de116ae 5de6905 08d5d7c 3cb1c16 08d5d7c 3cb1c16 7f35258 08d5d7c 5de6905 369ecce 5de6905 3cb1c16 5de6905 b545f1a 08d5d7c b545f1a ff9a3d4 de116ae 3cb1c16 de116ae 3cb1c16 de116ae 3cb1c16 de116ae 3cb1c16 de116ae 3cb1c16 de116ae 3cb1c16 de116ae 3cb1c16 de116ae 3cb1c16 de116ae b545f1a de116ae 3cb1c16 de116ae 3b3f560 de116ae ff9a3d4 de116ae 2310671 de116ae 02a9646 de116ae 02a9646 de116ae 02a9646 de116ae 02a9646 de116ae 5de6905 29a1596 fce5c47 29a1596 fce5c47 29a1596 3cb1c16 29a1596 3cb1c16 369ecce b545f1a 5de6905 b545f1a 5de6905 369ecce 5de6905 b545f1a 5de6905 369ecce 5de6905 29a1596 08d5d7c de116ae b545f1a de116ae 08d5d7c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 |
import json
import gradio as gr
from PIL import Image
import safetensors.torch
import spaces
import timm
from timm.models import VisionTransformer
import torch
from torchvision.transforms import transforms
from torchvision.transforms import InterpolationMode
import torchvision.transforms.functional as TF
from huggingface_hub import hf_hub_download
import numpy as np
import matplotlib.colormaps as cm
class Fit(torch.nn.Module):
def __init__(
self,
bounds: tuple[int, int] | int,
interpolation = InterpolationMode.LANCZOS,
grow: bool = True,
pad: float | None = None
):
super().__init__()
self.bounds = (bounds, bounds) if isinstance(bounds, int) else bounds
self.interpolation = interpolation
self.grow = grow
self.pad = pad
def forward(self, img: Image) -> Image:
wimg, himg = img.size
hbound, wbound = self.bounds
hscale = hbound / himg
wscale = wbound / wimg
if not self.grow:
hscale = min(hscale, 1.0)
wscale = min(wscale, 1.0)
scale = min(hscale, wscale)
if scale == 1.0:
return img
hnew = min(round(himg * scale), hbound)
wnew = min(round(wimg * scale), wbound)
img = TF.resize(img, (hnew, wnew), self.interpolation)
if self.pad is None:
return img
hpad = hbound - hnew
wpad = wbound - wnew
tpad = hpad // 2
bpad = hpad - tpad
lpad = wpad // 2
rpad = wpad - lpad
return TF.pad(img, (lpad, tpad, rpad, bpad), self.pad)
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}(" +
f"bounds={self.bounds}, " +
f"interpolation={self.interpolation.value}, " +
f"grow={self.grow}, " +
f"pad={self.pad})"
)
class CompositeAlpha(torch.nn.Module):
def __init__(
self,
background: tuple[float, float, float] | float,
):
super().__init__()
self.background = (background, background, background) if isinstance(background, float) else background
self.background = torch.tensor(self.background).unsqueeze(1).unsqueeze(2)
def forward(self, img: torch.Tensor) -> torch.Tensor:
if img.shape[-3] == 3:
return img
alpha = img[..., 3, None, :, :]
img[..., :3, :, :] *= alpha
background = self.background.expand(-1, img.shape[-2], img.shape[-1])
if background.ndim == 1:
background = background[:, None, None]
elif background.ndim == 2:
background = background[None, :, :]
img[..., :3, :, :] += (1.0 - alpha) * background
return img[..., :3, :, :]
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}(" +
f"background={self.background})"
)
transform = transforms.Compose([
Fit((384, 384)),
transforms.ToTensor(),
CompositeAlpha(0.5),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True),
transforms.CenterCrop((384, 384)),
])
model = timm.create_model(
"vit_so400m_patch14_siglip_384.webli",
pretrained=False,
num_classes=9083,
) # type: VisionTransformer
class GatedHead(torch.nn.Module):
def __init__(self,
num_features: int,
num_classes: int
):
super().__init__()
self.num_classes = num_classes
self.linear = torch.nn.Linear(num_features, num_classes * 2)
self.act = torch.nn.Sigmoid()
self.gate = torch.nn.Sigmoid()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.linear(x)
x = self.act(x[:, :self.num_classes]) * self.gate(x[:, self.num_classes:])
return x
model.head = GatedHead(min(model.head.weight.shape), 9083)
cached_model = hf_hub_download(
repo_id="RedRocket/JointTaggerProject",
subfolder="JTP_PILOT2",
filename="JTP_PILOT2-e3-vit_so400m_patch14_siglip_384.safetensors"
)
safetensors.torch.load_model(model, cached_model)
model.eval()
with open("tagger_tags.json", "r") as file:
tags = json.load(file) # type: dict
allowed_tags = list(tags.keys())
for idx, tag in enumerate(allowed_tags):
allowed_tags[idx] = tag.replace("_", " ")
@spaces.GPU(duration=5)
def run_classifier(image: Image.Image, threshold):
img = image.convert('RGBA')
tensor = transform(img).unsqueeze(0)
with torch.no_grad():
probits = model(tensor)[0] # type: torch.Tensor
values, indices = probits.topk(250)
tag_score = dict()
for i in range(indices.size(0)):
tag_score[allowed_tags[indices[i]]] = values[i].item()
sorted_tag_score = dict(sorted(tag_score.items(), key=lambda item: item[1], reverse=True))
return *create_tags(threshold, sorted_tag_score), img, sorted_tag_score
def create_tags(threshold, sorted_tag_score: dict):
filtered_tag_score = {key: value for key, value in sorted_tag_score.items() if value > threshold}
text_no_impl = ", ".join(filtered_tag_score.keys())
return text_no_impl, filtered_tag_score
def clear_image():
return "", {}, None, {}, None
def cam_inference(img, threshold, alpha, evt: gr.SelectData):
target_tag = evt.value
tensor = transform(img).unsqueeze(0)
gradients = {}
activations = {}
cam = None
target_tag_index = None
def hook_forward(module, input, output):
activations['value'] = output
def hook_backward(module, grad_in, grad_out):
gradients['value'] = grad_out[0]
target_tag_index = allowed_tags.index(target_tag)
handle_forward = model.norm.register_forward_hook(hook_forward)
handle_backward = model.norm.register_full_backward_hook(hook_backward)
probits = model(tensor)[0].cpu()
model.zero_grad()
target_score = probits[target_tag_index]
target_score.backward(retain_graph=True)
grads = gradients.get('value')
acts = activations.get('value')
patch_grads = grads
patch_acts = acts
weights = torch.mean(patch_grads, dim=1).squeeze(0)
cam_1d = torch.einsum('pe,e->p', patch_acts.squeeze(0), weights)
cam_1d = torch.relu(cam_1d)
cam = cam_1d.reshape(27, 27).detach().cpu().numpy()
handle_forward.remove()
handle_backward.remove()
gradients = {}
activations = {}
return create_cam_visualization_pil(img, cam, alpha=alpha, vis_threshold=threshold), cam
def create_cam_visualization_pil(image_pil, cam, alpha=0.6, vis_threshold=0.2):
"""
Overlays CAM on image and returns a PIL image.
Args:
image_pil: PIL Image (RGB)
cam: 2D numpy array (activation map)
alpha: float, blending factor
vis_threshold: float, minimum normalized CAM value to show color
Returns:
PIL.Image.Image with overlay
"""
w, h = image_pil.size
# Resize CAM to match image
cam_resized = np.array(Image.fromarray(cam).resize((w, h), resample=Image.Resampling.BILINEAR))
# Normalize CAM to [0, 1]
cam_norm = (cam_resized - cam_resized.min()) / (np.ptp(cam_resized) + 1e-8)
# Create heatmap using matplotlib colormap
colormap = cm.get_cmap('jet')
cam_colored = colormap(cam_norm)[:, :, :3] # RGB
cam_alpha = (cam_norm >= vis_threshold).astype(np.float32) * alpha # Alpha mask
cam_rgba = np.dstack((cam_colored, cam_alpha)) # Shape: (H, W, 4)
cam_image = Image.fromarray((cam_rgba * 255).astype(np.uint8), mode="RGBA")
# Composite over original
composite = Image.alpha_composite(image_pil, cam_image)
return composite
with gr.Blocks(css=".output-class { display: none; }") as demo:
gr.Markdown("""
## Joint Tagger Project: JTP-PILOT² Demo **BETA**
This tagger is designed for use on furry images (though may very well work on out-of-distribution images, potentially with funny results). A threshold of 0.2 is recommended. Lower thresholds often turn up more valid tags, but can also result in some amount of hallucinated tags.
This tagger is the result of joint efforts between members of the RedRocket team, with distinctions given to Thessalo for creating the foundation for this project with his efforts, RedHotTensors for redesigning the process into a second-order method that models information expectation, and drhead for dataset prep, creation of training code and supervision of training runs.
Thanks to metal63 for providing initial code for attention visualization (click a tag in the tag list to try it out!)
Special thanks to Minotoro at frosting.ai for providing the compute power for this project.
""")
original_image_state = gr.State() # stash a copy of the input image
sorted_tag_score_state = gr.State(value={}) # stash a copy of the input image
cam_state = gr.State()
with gr.Row():
with gr.Column():
image_input = gr.Image(label="Source", sources=['upload'], type='pil', height=512, show_label=False)
threshold_slider = gr.Slider(minimum=0.00, maximum=1.00, step=0.01, value=0.20, label="Tag Threshold")
cam_slider = gr.Slider(minimum=0.00, maximum=1.00, step=0.01, value=0.20, label="CAM Threshold")
alpha_slider = gr.Slider(minimum=0.00, maximum=1.00, step=0.01, value=0.60, label="CAM Alpha")
with gr.Column():
tag_string = gr.Textbox(label="Tag String")
label_box = gr.Label(label="Tag Predictions", num_top_classes=250, show_label=False)
image_input.upload(
fn=run_classifier,
inputs=[image_input, threshold_slider],
outputs=[tag_string, label_box, original_image_state, sorted_tag_score_state]
)
image_input.clear(
fn=clear_image,
inputs=[],
outputs=[tag_string, label_box, original_image_state, sorted_tag_score_state, cam_state]
)
threshold_slider.input(
fn=create_tags,
inputs=[threshold_slider, sorted_tag_score_state],
outputs=[tag_string, label_box]
)
label_box.select(
fn=cam_inference,
inputs=[original_image_state, cam_slider, alpha_slider],
outputs=[image_input]
)
cam_slider.input(
fn=create_cam_visualization_pil,
inputs=[original_image_state, cam_state, alpha_slider, cam_slider],
outputs=[image_input]
)
alpha_slider.input(
fn=create_cam_visualization_pil,
inputs=[original_image_state, cam_state, alpha_slider, cam_slider],
outputs=[image_input]
)
if __name__ == "__main__":
demo.launch() |