|
import torch |
|
import argparse |
|
import re |
|
from transformers import AutoProcessor, AutoModel |
|
from torchvision import transforms |
|
from data_loading import LMDBImageDataset |
|
from torch.utils.data import DataLoader |
|
from tqdm import tqdm |
|
import time |
|
|
|
torch.multiprocessing.set_sharing_strategy('file_system') |
|
|
|
def infer_image_size(model_name): |
|
""" |
|
Infer image size from the model name. |
|
Looks for a trailing hyphen followed by digits (e.g., "-336"). |
|
Defaults to 224 if not found. |
|
""" |
|
match = re.search(r'-([0-9]+)$', model_name) |
|
if match: |
|
return int(match.group(1)) |
|
else: |
|
return 224 |
|
|
|
def collate_fn(batch): |
|
images, labels = zip(*batch) |
|
return list(images), list(labels) |
|
|
|
def main(): |
|
parser = argparse.ArgumentParser(description="Compute embeddings for a Hugging Face model") |
|
parser.add_argument('--model_name', type=str, default="facebook/vit-mae-base", |
|
help="Hugging Face model name, e.g., facebook/vit-mae-base or openai/clip-vit-base-patch14-336") |
|
parser.add_argument('--lmdb_path', type=str, default='../lmdb_all_crops_pmfeed_4_3_16', help="Path to the LMDB image dataset") |
|
parser.add_argument('--batch_size', type=int, default=128) |
|
parser.add_argument('--num_workers', type=int, default=8) |
|
args = parser.parse_args() |
|
|
|
|
|
image_size = infer_image_size(args.model_name) |
|
print(f"Inferred image size: {image_size}") |
|
|
|
transform = transforms.Compose([ |
|
transforms.Resize((image_size, image_size)), |
|
]) |
|
|
|
|
|
dataset = LMDBImageDataset( |
|
lmdb_path=args.lmdb_path, |
|
transform=transform, |
|
limit=None |
|
) |
|
dataloader = DataLoader( |
|
dataset, |
|
batch_size=args.batch_size, |
|
shuffle=False, |
|
num_workers=args.num_workers, |
|
collate_fn=collate_fn |
|
) |
|
|
|
|
|
model_name = args.model_name |
|
processor = AutoProcessor.from_pretrained(model_name, do_normalize=False) |
|
model = AutoModel.from_pretrained(model_name) |
|
print(f"Using model: {model_name}") |
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
model.to(device) |
|
model.eval() |
|
|
|
all_embeddings = [] |
|
all_cow_ids = [] |
|
|
|
|
|
with torch.no_grad(): |
|
for images, cow_ids in tqdm(dataloader, unit='batch'): |
|
inputs = processor(images=images, return_tensors="pt") |
|
inputs = inputs.to(device) |
|
|
|
if "clip-vit" in model_name: |
|
image_features = model.get_image_features(**inputs) |
|
elif "vit-mae" in model_name: |
|
image_features = model(**inputs).last_hidden_state.mean(dim=1) |
|
else: |
|
image_features = model(**inputs).pooler_output |
|
|
|
|
|
|
|
all_embeddings.append(image_features.cpu()) |
|
all_cow_ids.extend(cow_ids) |
|
|
|
|
|
embeddings = torch.cat(all_embeddings, dim=0) |
|
output_file = f"{model_name.replace('/', '_')}_embeddings.pt" |
|
torch.save(embeddings, output_file) |
|
print(f"Embeddings saved to {output_file}") |
|
|
|
if __name__ == '__main__': |
|
main() |