File size: 3,572 Bytes
900cef8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import torch
import argparse
import re
from transformers import AutoProcessor, AutoModel
from torchvision import transforms
from data_loading import LMDBImageDataset
from torch.utils.data import DataLoader
from tqdm import tqdm
import time

torch.multiprocessing.set_sharing_strategy('file_system')

def infer_image_size(model_name):
    """
    Infer image size from the model name.
    Looks for a trailing hyphen followed by digits (e.g., "-336").
    Defaults to 224 if not found.
    """
    match = re.search(r'-([0-9]+)$', model_name)
    if match:
        return int(match.group(1))
    else:
        return 224

def collate_fn(batch):
    images, labels = zip(*batch)
    return list(images), list(labels)

def main():
    parser = argparse.ArgumentParser(description="Compute embeddings for a Hugging Face model")
    parser.add_argument('--model_name', type=str, default="facebook/vit-mae-base",
                        help="Hugging Face model name, e.g., facebook/vit-mae-base or openai/clip-vit-base-patch14-336")
    parser.add_argument('--lmdb_path', type=str, default='../lmdb_all_crops_pmfeed_4_3_16', help="Path to the LMDB image dataset")
    parser.add_argument('--batch_size', type=int, default=128)
    parser.add_argument('--num_workers', type=int, default=8)
    args = parser.parse_args()

    # Infer image size from the model name
    image_size = infer_image_size(args.model_name)
    print(f"Inferred image size: {image_size}")

    transform = transforms.Compose([
        transforms.Resize((image_size, image_size)),
    ])

    # Create the dataset and dataloader.
    dataset = LMDBImageDataset(
        lmdb_path=args.lmdb_path, 
        transform=transform, 
        limit=None
    )
    dataloader = DataLoader(
        dataset, 
        batch_size=args.batch_size, 
        shuffle=False, 
        num_workers=args.num_workers,
        collate_fn=collate_fn
    )

    # Load the model and processor.
    model_name = args.model_name
    processor = AutoProcessor.from_pretrained(model_name, do_normalize=False)
    model = AutoModel.from_pretrained(model_name)
    print(f"Using model: {model_name}")
    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)
    model.eval()

    all_embeddings = []
    all_cow_ids = []

    # Loop through the dataset and compute embeddings.
    with torch.no_grad():
        for images, cow_ids in tqdm(dataloader, unit='batch'):
            inputs = processor(images=images, return_tensors="pt")
            inputs = inputs.to(device)
            # Get the mean of the last hidden state as the image embedding.
            if "clip-vit" in model_name:
                image_features = model.get_image_features(**inputs)
            elif "vit-mae" in model_name:
                image_features = model(**inputs).last_hidden_state.mean(dim=1)
            else:
                image_features = model(**inputs).pooler_output
            # image_features = model(**inputs).last_hidden_state.mean(dim=1) # mae model
            # image_features = model.get_image_features(**inputs) # clip model
            # image_features = model(**inputs).pooler_output # everything else
            all_embeddings.append(image_features.cpu())
            all_cow_ids.extend(cow_ids)

    # Concatenate and save the embeddings.
    embeddings = torch.cat(all_embeddings, dim=0)
    output_file = f"{model_name.replace('/', '_')}_embeddings.pt"
    torch.save(embeddings, output_file)
    print(f"Embeddings saved to {output_file}")

if __name__ == '__main__':
    main()