Delete utils/data_generator.py
Browse files- utils/data_generator.py +0 -151
utils/data_generator.py
DELETED
@@ -1,151 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
import dlib
|
3 |
-
import os
|
4 |
-
import bz2
|
5 |
-
import random
|
6 |
-
from tqdm.notebook import tqdm
|
7 |
-
import shutil
|
8 |
-
from utils import image_to_array, load_image, download_data
|
9 |
-
from utils.face_detection import crop_face, get_face_keypoints_detecting_function
|
10 |
-
from mask_utils.mask_utils import mask_image
|
11 |
-
|
12 |
-
|
13 |
-
class DataGenerator:
|
14 |
-
def __init__(self, configuration):
|
15 |
-
self.configuration = configuration
|
16 |
-
self.path_to_data = configuration.get('input_images_path')
|
17 |
-
self.path_to_patterns = configuration.get('path_to_patterns')
|
18 |
-
self.minimal_confidence = configuration.get('minimal_confidence')
|
19 |
-
self.hyp_ratio = configuration.get('hyp_ratio')
|
20 |
-
self.coordinates_range = configuration.get('coordinates_range')
|
21 |
-
self.test_image_count = configuration.get('test_image_count')
|
22 |
-
self.train_image_count = configuration.get('train_image_count')
|
23 |
-
self.train_data_path = configuration.get('train_data_path')
|
24 |
-
self.test_data_path = configuration.get('test_data_path')
|
25 |
-
self.predictor_path = configuration.get('landmarks_predictor_path')
|
26 |
-
self.check_predictor()
|
27 |
-
|
28 |
-
self.valid_image_extensions = ('png', 'jpg', 'jpeg')
|
29 |
-
self.face_keypoints_detecting_fun = get_face_keypoints_detecting_function(self.minimal_confidence)
|
30 |
-
|
31 |
-
def check_predictor(self):
|
32 |
-
""" Check if predictor exists. If not downloads it. """
|
33 |
-
if not os.path.exists(self.predictor_path):
|
34 |
-
print('Downloading missing predictor.')
|
35 |
-
url = self.configuration.get('landmarks_predictor_download_url')
|
36 |
-
download_data(url, self.predictor_path + '.bz2', 64040097)
|
37 |
-
print(f'Decompressing downloaded file into {self.predictor_path}')
|
38 |
-
with bz2.BZ2File(self.predictor_path + '.bz2') as fr, open(self.predictor_path, 'wb') as fw:
|
39 |
-
shutil.copyfileobj(fr, fw)
|
40 |
-
|
41 |
-
def get_face_landmarks(self, image):
|
42 |
-
"""Compute 68 facial landmarks"""
|
43 |
-
landmarks = []
|
44 |
-
image_array = image_to_array(image)
|
45 |
-
detector = dlib.get_frontal_face_detector()
|
46 |
-
predictor = dlib.shape_predictor(self.predictor_path)
|
47 |
-
face_rectangles = detector(image_array)
|
48 |
-
if len(face_rectangles) < 1:
|
49 |
-
return None
|
50 |
-
dlib_shape = predictor(image_array, face_rectangles[0])
|
51 |
-
for i in range(0, dlib_shape.num_parts):
|
52 |
-
landmarks.append([dlib_shape.part(i).x, dlib_shape.part(i).y])
|
53 |
-
return landmarks
|
54 |
-
|
55 |
-
def get_files_faces(self):
|
56 |
-
"""Get path of all images in dataset"""
|
57 |
-
image_files = []
|
58 |
-
for dirpath, dirs, files in os.walk(self.path_to_data):
|
59 |
-
for filename in files:
|
60 |
-
fname = os.path.join(dirpath, filename)
|
61 |
-
if fname.endswith(self.valid_image_extensions):
|
62 |
-
image_files.append(fname)
|
63 |
-
|
64 |
-
return image_files
|
65 |
-
|
66 |
-
def generate_images(self, image_size=None, test_image_count=None, train_image_count=None):
|
67 |
-
"""Generate test and train data (images with and without the mask)"""
|
68 |
-
if image_size is None:
|
69 |
-
image_size = self.configuration.get('image_size')
|
70 |
-
if test_image_count is None:
|
71 |
-
test_image_count = self.test_image_count
|
72 |
-
if train_image_count is None:
|
73 |
-
train_image_count = self.train_image_count
|
74 |
-
|
75 |
-
if not os.path.exists(self.train_data_path):
|
76 |
-
os.mkdir(self.train_data_path)
|
77 |
-
os.mkdir(os.path.join(self.train_data_path, 'inputs'))
|
78 |
-
os.mkdir(os.path.join(self.train_data_path, 'outputs'))
|
79 |
-
|
80 |
-
if not os.path.exists(self.test_data_path):
|
81 |
-
os.mkdir(self.test_data_path)
|
82 |
-
os.mkdir(os.path.join(self.test_data_path, 'inputs'))
|
83 |
-
os.mkdir(os.path.join(self.test_data_path, 'outputs'))
|
84 |
-
|
85 |
-
print('Generating testing data')
|
86 |
-
self.generate_data(test_image_count,
|
87 |
-
image_size=image_size,
|
88 |
-
save_to=self.test_data_path)
|
89 |
-
print('Generating training data')
|
90 |
-
self.generate_data(train_image_count,
|
91 |
-
image_size=image_size,
|
92 |
-
save_to=self.train_data_path)
|
93 |
-
|
94 |
-
def generate_data(self, number_of_images, image_size=None, save_to=None):
|
95 |
-
""" Add masks on `number_of_images` images
|
96 |
-
if save_to is valid path to folder images are saved there otherwise generated data are just returned in list
|
97 |
-
"""
|
98 |
-
inputs = []
|
99 |
-
outputs = []
|
100 |
-
|
101 |
-
if image_size is None:
|
102 |
-
image_size = self.configuration.get('image_size')
|
103 |
-
|
104 |
-
for i, file in tqdm(enumerate(random.sample(self.get_files_faces(), number_of_images)), total=number_of_images):
|
105 |
-
# Load images
|
106 |
-
image = load_image(file)
|
107 |
-
|
108 |
-
# Detect keypoints and landmarks on face
|
109 |
-
face_landmarks = self.get_face_landmarks(image)
|
110 |
-
if face_landmarks is None:
|
111 |
-
continue
|
112 |
-
keypoints = self.face_keypoints_detecting_fun(image)
|
113 |
-
|
114 |
-
# Generate mask
|
115 |
-
image_with_mask = mask_image(copy.deepcopy(image), face_landmarks, self.configuration)
|
116 |
-
|
117 |
-
# Crop images
|
118 |
-
cropped_image = crop_face(image_with_mask, keypoints)
|
119 |
-
cropped_original = crop_face(image, keypoints)
|
120 |
-
|
121 |
-
# Resize all images to NN input size
|
122 |
-
res_image = cropped_image.resize(image_size)
|
123 |
-
res_original = cropped_original.resize(image_size)
|
124 |
-
|
125 |
-
# Save generated data to lists or to folder
|
126 |
-
if save_to is None:
|
127 |
-
inputs.append(res_image)
|
128 |
-
outputs.append(res_original)
|
129 |
-
else:
|
130 |
-
res_image.save(os.path.join(save_to, 'inputs', f"{i:06d}.png"))
|
131 |
-
res_original.save(os.path.join(save_to, 'outputs', f"{i:06d}.png"))
|
132 |
-
|
133 |
-
if save_to is None:
|
134 |
-
return inputs, outputs
|
135 |
-
|
136 |
-
def get_dataset_examples(self, n=10, test_dataset=False):
|
137 |
-
"""
|
138 |
-
Returns `n` random images form dataset. If `test_dataset` parameter
|
139 |
-
is not provided or False it will return images from training part of dataset.
|
140 |
-
If `test_dataset` parameter is True it will return images from testing part of dataset.
|
141 |
-
"""
|
142 |
-
if test_dataset:
|
143 |
-
data_path = self.test_data_path
|
144 |
-
else:
|
145 |
-
data_path = self.train_data_path
|
146 |
-
|
147 |
-
images = os.listdir(os.path.join(data_path, 'inputs'))
|
148 |
-
images = random.sample(images, n)
|
149 |
-
inputs = [os.path.join(data_path, 'inputs', img) for img in images]
|
150 |
-
outputs = [os.path.join(data_path, 'outputs', img) for img in images]
|
151 |
-
return inputs, outputs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|