Spaces:
Runtime error
Runtime error
# Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. | |
# | |
# This work is made available under the Nvidia Source Code License-NC. | |
# To view a copy of this license, check out LICENSE.md | |
""" | |
Modified from | |
https://github.com/mseitzer/pytorch-fid | |
Code adapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead | |
of Tensorflow | |
Copyright 2018 Institute of Bioinformatics, JKU Linz | |
Licensed under the Apache License, Version 2.0 (the "License"); | |
you may not use this file except in compliance with the License. | |
You may obtain a copy of the License at | |
http://www.apache.org/licenses/LICENSE-2.0 | |
Unless required by applicable law or agreed to in writing, software | |
distributed under the License is distributed on an "AS IS" BASIS, | |
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
See the License for the specific language governing permissions and | |
limitations under the License. | |
""" | |
import torch | |
import torch.nn.functional as F | |
from torch import nn | |
try: | |
from torchvision.models.utils import load_state_dict_from_url | |
except ImportError: | |
from torch.utils.model_zoo import load_url as load_state_dict_from_url | |
from torchvision.models import inception, inception_v3, vgg16 | |
# Inception weights ported to Pytorch from | |
# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz | |
FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases' \ | |
'/download/fid_weights/pt_inception-2015-12-05-6726825d.pth' | |
class SwAV(nn.Module): | |
def __init__(self): | |
super().__init__() | |
self.model = torch.hub.load('facebookresearch/swav', 'resnet50', | |
pretrained=True) | |
self.model.fc = torch.nn.Sequential() | |
def forward(self, x, align_corners=True): | |
y = self.model(F.interpolate( | |
x, size=(224, 224), mode='bicubic', align_corners=align_corners)) | |
return y | |
class Vgg16(nn.Module): | |
def __init__(self): | |
super().__init__() | |
self.model = vgg16(pretrained=True, init_weights=False) | |
self.model.classifier = torch.nn.Sequential( | |
*[self.model.classifier[i] for i in range(4)] | |
) | |
def forward(self, x, align_corners=True): | |
y = self.model(F.interpolate( | |
x, size=(224, 224), mode='bicubic', align_corners=align_corners)) | |
return y | |
class InceptionV3(nn.Module): | |
def __init__(self): | |
super().__init__() | |
self.model = inception_v3(transform_input=False, | |
pretrained=True, | |
init_weights=False) | |
self.model.fc = torch.nn.Sequential() | |
def forward(self, x, align_corners=True): | |
y = self.model(F.interpolate( | |
x, size=(299, 299), mode='bicubic', align_corners=align_corners)) | |
return y | |
class TFInceptionV3(nn.Module): | |
def __init__(self): | |
super().__init__() | |
self.model = inception_v3(transform_input=False, | |
num_classes=1008, | |
aux_logits=False, | |
pretrained=False, | |
init_weights=False) | |
self.model.Mixed_5b = FIDInceptionA(192, pool_features=32) | |
self.model.Mixed_5c = FIDInceptionA(256, pool_features=64) | |
self.model.Mixed_5d = FIDInceptionA(288, pool_features=64) | |
self.model.Mixed_6b = FIDInceptionC(768, channels_7x7=128) | |
self.model.Mixed_6c = FIDInceptionC(768, channels_7x7=160) | |
self.model.Mixed_6d = FIDInceptionC(768, channels_7x7=160) | |
self.model.Mixed_6e = FIDInceptionC(768, channels_7x7=192) | |
self.model.Mixed_7b = FIDInceptionE_1(1280) | |
self.model.Mixed_7c = FIDInceptionE_2(2048) | |
state_dict = load_state_dict_from_url( | |
FID_WEIGHTS_URL, progress=True, map_location='cpu' | |
) | |
self.model.load_state_dict(state_dict) | |
self.model.fc = torch.nn.Sequential() | |
def forward(self, x, align_corners=True): | |
y = self.model(F.interpolate( | |
x, size=(299, 299), mode='bicubic', align_corners=align_corners)) | |
return y | |
class FIDInceptionA(inception.InceptionA): | |
"""InceptionA block patched for FID computation""" | |
def __init__(self, in_channels, pool_features): | |
super(FIDInceptionA, self).__init__(in_channels, pool_features) | |
def forward(self, x): | |
branch1x1 = self.branch1x1(x) | |
branch5x5 = self.branch5x5_1(x) | |
branch5x5 = self.branch5x5_2(branch5x5) | |
branch3x3dbl = self.branch3x3dbl_1(x) | |
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) | |
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) | |
# Patch: Tensorflow's average pool does not use the padded zero's in | |
# its average calculation | |
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, | |
count_include_pad=False) | |
branch_pool = self.branch_pool(branch_pool) | |
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] | |
return torch.cat(outputs, 1) | |
class FIDInceptionC(inception.InceptionC): | |
"""InceptionC block patched for FID computation""" | |
def __init__(self, in_channels, channels_7x7): | |
super(FIDInceptionC, self).__init__(in_channels, channels_7x7) | |
def forward(self, x): | |
branch1x1 = self.branch1x1(x) | |
branch7x7 = self.branch7x7_1(x) | |
branch7x7 = self.branch7x7_2(branch7x7) | |
branch7x7 = self.branch7x7_3(branch7x7) | |
branch7x7dbl = self.branch7x7dbl_1(x) | |
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) | |
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) | |
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) | |
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) | |
# Patch: Tensorflow's average pool does not use the padded zero's in | |
# its average calculation | |
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, | |
count_include_pad=False) | |
branch_pool = self.branch_pool(branch_pool) | |
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] | |
return torch.cat(outputs, 1) | |
class FIDInceptionE_1(inception.InceptionE): | |
"""First InceptionE block patched for FID computation""" | |
def __init__(self, in_channels): | |
super(FIDInceptionE_1, self).__init__(in_channels) | |
def forward(self, x): | |
branch1x1 = self.branch1x1(x) | |
branch3x3 = self.branch3x3_1(x) | |
branch3x3 = [ | |
self.branch3x3_2a(branch3x3), | |
self.branch3x3_2b(branch3x3), | |
] | |
branch3x3 = torch.cat(branch3x3, 1) | |
branch3x3dbl = self.branch3x3dbl_1(x) | |
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) | |
branch3x3dbl = [ | |
self.branch3x3dbl_3a(branch3x3dbl), | |
self.branch3x3dbl_3b(branch3x3dbl), | |
] | |
branch3x3dbl = torch.cat(branch3x3dbl, 1) | |
# Patch: Tensorflow's average pool does not use the padded zero's in | |
# its average calculation | |
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, | |
count_include_pad=False) | |
branch_pool = self.branch_pool(branch_pool) | |
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] | |
return torch.cat(outputs, 1) | |
class FIDInceptionE_2(inception.InceptionE): | |
"""Second InceptionE block patched for FID computation""" | |
def __init__(self, in_channels): | |
super(FIDInceptionE_2, self).__init__(in_channels) | |
def forward(self, x): | |
branch1x1 = self.branch1x1(x) | |
branch3x3 = self.branch3x3_1(x) | |
branch3x3 = [ | |
self.branch3x3_2a(branch3x3), | |
self.branch3x3_2b(branch3x3), | |
] | |
branch3x3 = torch.cat(branch3x3, 1) | |
branch3x3dbl = self.branch3x3dbl_1(x) | |
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) | |
branch3x3dbl = [ | |
self.branch3x3dbl_3a(branch3x3dbl), | |
self.branch3x3dbl_3b(branch3x3dbl), | |
] | |
branch3x3dbl = torch.cat(branch3x3dbl, 1) | |
# Patch: The FID Inception model uses max pooling instead of average | |
# pooling. This is likely an error in this specific Inception | |
# implementation, as other Inception models use average pooling here | |
# (which matches the description in the paper). | |
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1) | |
branch_pool = self.branch_pool(branch_pool) | |
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] | |
return torch.cat(outputs, 1) | |