File size: 3,288 Bytes
ff65b0b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
#############################################################################
#
#   Source from:
#   https://github.com/leonelhs/baldgan
#   Forked from:
#   https://github.com/david-svitov/baldgan
#   Reimplemented by: Leonel Hernández
#
##############################################################################
import os

import PIL.Image
import cv2
import numpy as np
from retinaface import RetinaFace
from skimage import transform as trans
from huggingface_hub import hf_hub_download

from baldgan.model import buildModel

BALDGAN_REPO_ID = "leonelhs/baldgan"

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
gpu_id = -1

image_size = [256, 256]

src = np.array([
    [30.2946, 51.6963],
    [65.5318, 51.5014],
    [48.0252, 71.7366],
    [33.5493, 92.3655],
    [62.7299, 92.2041]], dtype=np.float32)

src[:, 0] += 8.0
src[:, 0] += 15.0
src[:, 1] += 30.0
src /= 112
src *= 200


def list2array(values):
    return np.array(list(values))


def align_face(img):
    faces = RetinaFace.detect_faces(img)
    bounding_boxes = np.array([list2array(faces[face]['facial_area']) for face in faces])
    points = np.array([list2array(faces[face]['landmarks'].values()) for face in faces])
    white_image = np.ones(img.shape, dtype=np.uint8) * 255

    result_faces = []
    result_masks = []
    result_matrix = []

    if bounding_boxes.shape[0] > 0:
        det = bounding_boxes[:, 0:4]
        for i in range(det.shape[0]):
            _det = det[i]
            dst = points[i]

            tform = trans.SimilarityTransform()
            tform.estimate(dst, src)
            M = tform.params[0:2, :]
            warped = cv2.warpAffine(img, M, (image_size[1], image_size[0]), borderValue=0.0)
            mask = cv2.warpAffine(white_image, M, (image_size[1], image_size[0]), borderValue=0.0)

            result_faces.append(warped)
            result_masks.append(mask)
            result_matrix.append(tform.params[0:3, :])

    return result_faces, result_masks, result_matrix


def put_face_back(img, faces, masks, result_matrix):
    for i in range(len(faces)):
        M = np.linalg.inv(result_matrix[i])[0:2]
        warped = cv2.warpAffine(faces[i], M, (img.shape[1], img.shape[0]), borderValue=0.0)
        mask = cv2.warpAffine(masks[i], M, (img.shape[1], img.shape[0]), borderValue=0.0)
        mask = mask // 255
        img = img * (1 - mask)
        img = img.astype(np.uint8)
        img += warped * mask
    return img


class BaldFace:

    def __init__(self):
        self.model = buildModel()
        model_path = hf_hub_download(repo_id=BALDGAN_REPO_ID, filename='model_G_5_170.hdf5')
        self.model.load_weights(model_path)

    def make(self, image):
        # image = np.array(image)
        faces, masks, matrix = align_face(image)
        result_faces = []

        for face in faces:
            input_face = np.expand_dims(face, axis=0)
            input_face = input_face / 127.5 - 1.
            result = self.model.predict(input_face)[0]
            result = ((result + 1.) * 127.5)
            result = result.astype(np.uint8)
            result_faces.append(result)

        img_result = put_face_back(image, result_faces, masks, matrix)
        return PIL.Image.fromarray(img_result)