File size: 13,603 Bytes
9ca0a51
d96c5c5
9ca0a51
 
d96c5c5
9ca0a51
 
d96c5c5
151b2c7
 
9ca0a51
 
 
 
 
d96c5c5
 
 
9ca0a51
 
 
 
 
 
66f68c7
9ca0a51
 
743f73f
0b05083
151b2c7
c336617
8775ba7
9ca0a51
 
 
c336617
 
d2aa485
07387a5
9ca0a51
 
b15f8f4
 
 
9ca0a51
 
 
 
b15f8f4
 
 
81a4855
b15f8f4
151b2c7
 
b15f8f4
 
 
 
151b2c7
b15f8f4
 
151b2c7
 
 
 
 
 
 
 
 
 
 
 
 
b15f8f4
151b2c7
b15f8f4
 
151b2c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b15f8f4
151b2c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b15f8f4
 
151b2c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9ca0a51
b15f8f4
 
 
 
66f68c7
 
8775ba7
151b2c7
 
 
 
b15f8f4
151b2c7
 
 
8775ba7
f2b0398
 
 
8775ba7
151b2c7
 
 
d96c5c5
4c8602a
66eaeb0
4c8602a
 
151b2c7
103c58f
151b2c7
 
 
 
 
 
103c58f
151b2c7
 
 
 
8fec3e0
151b2c7
 
7223d1f
f02d1fe
 
 
 
b9bd11b
 
f02d1fe
b9bd11b
f02d1fe
b9bd11b
 
 
f02d1fe
b9bd11b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f02d1fe
6b0bc5a
 
89b1493
 
 
 
 
 
 
 
6b0bc5a
89b1493
151b2c7
 
89b1493
 
 
 
 
 
 
151b2c7
c2d6b47
d7aeb67
ce5b67a
89b1493
 
 
d7aeb67
89b1493
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
# Transformers and its models
#import transformers

# For Image Processing
#from transformers import ViTImageProcessor

# For Model
#from transformers import ViTModel, ViTConfig, pipeline
import insightface
from insightface.app import FaceAnalysis

# For data augmentation
from torchvision import transforms, datasets

# For GPU
#from transformers import set_seed
#from torch.optim import AdamW
#from accelerate import Accelerator, notebook_launcher

# For Data Loaders
import datasets
from torch.utils.data import Dataset, DataLoader

# For Display
#from tqdm.notebook import tqdm

# Other Generic Libraries
import torch
from PIL import Image
import cv2
import os
import streamlit as st
import gc
from glob import glob
import shutil
import pandas as pd
import numpy as np
#import matplotlib.pyplot as plt
from io import BytesIO
import torch.nn.functional as F

# Set the device (GPU or CPU)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Initialse Globle Variables
MODEL_TRANSFORMER = 'google/vit-base-patch16-224'
BATCH_SIZE = 8

# Set Paths
data_path = 'employees'
model_path = 'vit_pytorch_GPU_1.pt'
webcam_path = 'captured_image.jpg'

IMAGE_SHAPE = 640

# Set Title
st.title("Employee Attendance System")

# Define Image Processor
#image_processor_prod = ViTImageProcessor.from_pretrained(MODEL_TRANSFORMER, attn_implementation="sdpa", torch_dtype=torch.float16)

# Define ML Model
#class FaceEmbeddingModel(torch.nn.Module):
#    def __init__(self, model_name, embedding_size):
#        super(FaceEmbeddingModel, self).__init__()
#        self.config = ViTConfig.from_pretrained(model_name, id2label=idx_to_label, label2id=label_to_idx, return_dict=True)
#        self.backbone = ViTModel.from_pretrained(model_name, config=self.config)  # Load ViT model
#        self.fc = torch.nn.Linear(self.backbone.config.hidden_size, embedding_size) # Convert to 512D feature vector
#
#    def forward(self, images):
#        x = self.backbone(images).last_hidden_state[:, 0]  # Extract embeddings
#        x = self.fc(x)  # Convert to 512D embedding
#        return torch.nn.functional.normalize(x)  # Normalize for cosine similarity
    
    
# Load the model
#model_pretrained = torch.load(model_path, map_location=device, weights_only=False)

# Define the ML model - Evaluation function
#def prod_function(transformer_model, prod_dl, webcam_dl):
#    # Initialize accelerator
#    accelerator = Accelerator()
#
#    # to INFO for the main process only.
#    #if accelerator.is_main_process:
#    #    datasets.utils.logging.set_verbosity_warning()
#    #    transformers.utils.logging.set_verbosity_info()
#    #else:
#    #    datasets.utils.logging.set_verbosity_error()
#    #    transformers.utils.logging.set_verbosity_error()
#
#    # The seed need to be set before we instantiate the model, as it will determine the random head.
#    set_seed(42)
#
#    # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the prepare method.
#    accelerated_model, acclerated_prod_dl, acclerated_webcam_dl = accelerator.prepare(transformer_model, prod_dl, webcam_dl)
#
#    # Evaluate at the end of the epoch
#    accelerated_model.eval()
#
#    # Find Embedding of the image to be evaluated
#    for batch in acclerated_webcam_dl:
#        with torch.no_grad():
#            #img_prod = acclerated_prod_data['pixel_values']
#            emb_prod = accelerated_model(batch['pixel_values'])
#
#    prod_preds = []
#
#    for batch in acclerated_prod_dl:
#        #img = batch['pixel_values']
#        with torch.no_grad():
#            emb = accelerated_model(batch['pixel_values'])
#        distance = F.pairwise_distance(emb, emb_prod)
#
#        prod_preds.append(distance)
#    return prod_preds
        
# Creation of Dataloader
#class CustomDatasetProd(Dataset):
#    def __init__(self, image_path, webcam):
#        self.image_path = image_path
#        self.webcam = webcam
#
#    def __len__(self):
#        return len(self.image_path)
#
#    def __getitem__(self, idx):
#        if webcam == False:
#            img = cv2.imread(image_path[idx])
#        else: 
#            img = image_path
#        faces = app.get(img)
#        
#        if not faces:
#            raise Exception("No face detected")
#        
#        pixel_values = faces[0].embedding  # embedding is a 512-dimensional vector
#        item = {
#            'pixel_values': pixel_values.squeeze(0),
#        }
#        return item
        
# Creation of Dataset       
#class CreateDatasetProd():
#    def __init__(self, image_processor):
#        super().__init__()
#        self.image_processor = image_processor
#        # Define a transformation pipeline
#        self.transform_prod = transforms.v2.Compose([
#                                    transforms.v2.ToImage(),
#                                    transforms.v2.ToDtype(torch.uint8, scale=False)
#                                ])
#
#    def get_pixels(self, img_paths):
#        pixel_values = []
#        for path in img_paths:
#            # Read and process Images
#            img = Image.open(path)
#            img = self.transform_prod(img)
#
#            # Scaling the video to ML model's desired format
#            img = self.image_processor(img, return_tensors='pt') #, input_data_format='channels_first')
#
#            pixel_values.append(img['pixel_values'].squeeze(0))
#
#            # Force garbage collection
#            del img
#            gc.collect()
#        return pixel_values
#        
#    def get_pixel(self, img_path):
#        # Read and process Images
#        img = Image.open(img_path)
#        img = self.transform_prod(img)
#
#        # Scaling the video to ML model's desired format
#        img = self.image_processor(img, return_tensors='pt') #, input_data_format='channels_first')
#
#        pixel_values = img['pixel_values'] #.squeeze(0)
#
#        # Force garbage collection
#        del img
#        gc.collect()
#        
#        return pixel_values
#        
#    def create_dataset(self, image_paths, webcam=False):
#        if webcam == True:
#            pixel_values = self.get_pixel(image_paths)
#        else:
#            pixel_values = torch.stack(self.get_pixels(image_paths))
#        
#        return CustomDatasetProd(pixel_values=pixel_values)
    
# Read images from directory
image_paths = []
image_file = glob(os.path.join(data_path, '*.jpg'))
#st.write(image_file)
image_paths.extend(image_file)
#st.write('input path size:', len(image_paths))
#st.write(image_paths)

# Initialize the app
app = FaceAnalysis(name="buffalo_l")  # buffalo_l includes ArcFace model
app.prepare(ctx_id=-1, det_size=(IMAGE_SHAPE, IMAGE_SHAPE))  # Use ctx_id=-1 if you want CPU, and ctx_id=0 for GPU

# Create DataLoader for Employees image
#dataset_prod_obj = CreateDatasetProd(image_processor_prod)
#prod_ds = dataset_prod_obj.create_dataset(image_paths, webcam=False)
#prod_dl = DataLoader(prod_ds, webcam=False, batch_size=BATCH_SIZE)

## Testing the dataloader
#prod_inputs = next(iter(prod_dl))
#st.write(prod_inputs['pixel_values'].shape) 


# Define the ML model - Evaluation function
def prod_function(app, prod_path, webcam_path):
    webcam_img = Image.open(webcam_path)
    np_webcam = np.array(webcam_img) # Convert to NumPy array
    cv2_webcam = cv2.cvtColor(np_webcam, cv2.COLOR_RGB2BGR) # Convert RGB (PIL) to BGR (OpenCV)
    
    webcam_emb = app.get(cv2_webcam, max_num=1)
    webcam_emb = webcam_emb[0].embedding
    webcam_emb = torch.from_numpy(np.array(webcam_emb))
    
    similarity_score = []
    for path in prod_path:
        img = cv2.imread(path)
        face_embedding = app.get(img, max_num=1)
        face_embedding = face_embedding[0].embedding
        face_embedding = torch.from_numpy(np.array(face_embedding))
        
        similarity_score.append(F.cosine_similarity(face_embedding,webcam_emb, dim=0))
        #distance = F.pairwise_distance(emb, emb_prod)
        #prod_preds.append(distance)
    similarity_score = torch.from_numpy(np.array(similarity_score))
    return similarity_score #prod_preds

about_tab, app_tab = st.tabs(["About the app", "Face Recognition"])
# About the app Tab
with about_tab:
    st.markdown(
        """
        # 👁️‍🗨️ AI-Powered Face Recognition Attendance System
        Effortless, Secure, and Accurate Attendance with Vision Transformer Technology
        
        An intelligent, facial recognition-based attendance solution that redefines how organizations manage employee presence. By leveraging cutting-edge computer vision and AI, the app automates attendance tracking with speed, precision, and reliability—no timecards, no fingerprint scans, just a glance.
        
        ## 🎯 Project Objective
        To eliminate outdated, manual attendance methods with a seamless, contactless facial recognition system. Our solution not only improves the accuracy of attendance logs but also boosts workplace security and streamlines HR operations—all in real time.
        Employees are simply scanned as they enter or leave the premises. Their attendance is automatically logged, reducing the risk of buddy punching, manual entry errors, and delays in record-keeping.
        
        ## 🧠 How It Works: The AI in Action
        At the core of this app is Google’s Vision Transformer (ViT) architecture, trained on the Labeled Faces in the Wild (LFW) dataset for robust, real-world face recognition.
        
        - **Face Detection & Feature Extraction**
        The model scans an employee’s face and extracts a high-dimensional representation of their unique features.
        
        - **Identity Matching with Confidence Scoring**
        The scanned features are compared to stored profiles. If the confidence score crosses a threshold, the model confirms the match and automatically marks attendance.
        
        - **Real-Time Logging**
        The app logs entry and exit times in real-time, providing live dashboards and attendance reports for HR and management.
        
        ## 🏗️ About the Architecture: Vision Transformer (ViT)
        The Vision Transformer (ViT) brings the power of transformer models—originally created for language—to the world of images. Here's how it works:
        
        - An input image is split into fixed-size non-overlapping patches.
        - Each patch is flattened and embedded into a higher-dimensional space.
        - These embeddings are fed into a transformer encoder, which learns complex spatial and contextual relationships across the entire image using multi-head self-attention.
        - ViT’s ability to capture global dependencies enables it to outperform traditional CNNs when trained on sufficient data.
        
        This makes it ideal for high-accuracy face recognition in dynamic, real-world environments.
        
        ## 📚 About the Dataset: Labeled Faces in the Wild (LFW)
        To train the model, we used the renowned Labeled Faces in the Wild (LFW) dataset, consisting of 13,000+ facial images, 5,749 individuals, each shown in diverse lighting, angles, and backgrounds. Sourced from real-world photographs of public figures. Benchmark dataset for tasks like face verification and recognition. The diversity in LFW ensures our model is resilient to variations in appearance, making it highly reliable in real-world workplace scenarios.
        
        ## ✅ Key Features
        - Fast, contactless attendance logging
        - High-security identity verification
        - Real-time data and analytics
        - Powered by state-of-the-art Vision Transformer architecture
        - Eliminates manual records, reduces fraud, enhances efficiency
        
        ## 👥 Use Cases
        - Corporate Offices: Accurate time tracking and security for large workforces
        - Factories & Warehouses: Contactless attendance in high-throughput environments
        - Educational Institutions: Seamless student and staff attendance
        - Healthcare & Public Services: Ensures hygienic, automated check-ins
        
        ## 🚀 Future Scope
        Looking ahead, we aim to integrate multi-face detection for group scanning, mask-aware recognition, and cross-location synchronization for distributed teams—all while preserving data privacy and security.
        """)

# Gesture recognition Tab
with app_tab:
    # Read image from Camera
    enable = st.checkbox("Enable camera")
    picture = st.camera_input("Take a picture", disabled=not enable)
    if picture is not None:
        #img = Image.open(picture)
        #picture.save(webcam_path, "JPEG")
        #st.write('Image saved as:',webcam_path)
    
        ## Create DataLoader for Webcam Image
        #webcam_ds = dataset_prod_obj.create_dataset(picture, webcam=True)
        #webcam_dl = DataLoader(picture, webcam=True, batch_size=BATCH_SIZE)
    
        ## Testing the dataloader
        #prod_inputs = next(iter(webcam_dl))
        #st.write(prod_inputs['pixel_values'].shape)
    
        with st.spinner("Wait for it...", show_time=True):
            # Run the predictions
            prediction = prod_function(app, image_paths, picture)
            #prediction = torch.cat(prediction, 0).to(device)
            match_idx = torch.argmax(prediction)
            st.write(prediction)
            st.write(image_paths)
        
            # Display the results
            if prediction[match_idx] >= 0.6:
              st.write('Welcome: ',image_paths[match_idx].split('/')[-1].split('.')[0])
            else:
              st.write("Match not found")