Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
fdb500d
1
Parent(s):
2c7e369
Change det_size from 512 to 256
Browse files
adaface/face_id_to_ada_prompt.py
CHANGED
@@ -606,7 +606,7 @@ class Arc2Face_ID2AdaPrompt(FaceID2AdaPrompt):
|
|
606 |
# Note there are two "models" in the path.
|
607 |
self.face_app = FaceAnalysis(name='antelopev2', root='models/insightface',
|
608 |
providers=['CPUExecutionProvider'])
|
609 |
-
self.face_app.prepare(ctx_id=0, det_size=(
|
610 |
print(f'Arc2Face Face encoder loaded on CPU.')
|
611 |
|
612 |
self.text_to_image_prompt_encoder = CLIPTextModelWrapper.from_pretrained(
|
@@ -654,14 +654,14 @@ class Arc2Face_ID2AdaPrompt(FaceID2AdaPrompt):
|
|
654 |
if str(device) == 'cpu':
|
655 |
self.face_app = FaceAnalysis(name='antelopev2', root='models/insightface',
|
656 |
providers=['CPUExecutionProvider'])
|
657 |
-
self.face_app.prepare(ctx_id=0, det_size=(
|
658 |
else:
|
659 |
device_id = device.index
|
660 |
self.face_app = FaceAnalysis(name='antelopev2', root='models/insightface',
|
661 |
providers=['CUDAExecutionProvider'],
|
662 |
provider_options=[{"device_id": device_id,
|
663 |
'gpu_mem_limit': 2 * 1024 * 1024 * 1024}])
|
664 |
-
self.face_app.prepare(ctx_id=device_id, det_size=(
|
665 |
|
666 |
self.device = device
|
667 |
print(f'Arc2Face Face encoder reloaded on {device}.')
|
@@ -800,14 +800,14 @@ class ConsistentID_ID2AdaPrompt(FaceID2AdaPrompt):
|
|
800 |
if str(device) == 'cpu':
|
801 |
self.face_app = FaceAnalysis(name='buffalo_l', root='models/insightface',
|
802 |
providers=['CPUExecutionProvider'])
|
803 |
-
self.face_app.prepare(ctx_id=0, det_size=(
|
804 |
else:
|
805 |
device_id = device.index
|
806 |
self.face_app = FaceAnalysis(name='buffalo_l', root='models/insightface',
|
807 |
providers=['CUDAExecutionProvider'],
|
808 |
provider_options=[{"device_id": device_id,
|
809 |
'gpu_mem_limit': 2 * 1024 * 1024 * 1024}])
|
810 |
-
self.face_app.prepare(ctx_id=device_id, det_size=(
|
811 |
|
812 |
self.device = device
|
813 |
self.pipe.face_app = self.face_app
|
|
|
606 |
# Note there are two "models" in the path.
|
607 |
self.face_app = FaceAnalysis(name='antelopev2', root='models/insightface',
|
608 |
providers=['CPUExecutionProvider'])
|
609 |
+
self.face_app.prepare(ctx_id=0, det_size=(256, 256))
|
610 |
print(f'Arc2Face Face encoder loaded on CPU.')
|
611 |
|
612 |
self.text_to_image_prompt_encoder = CLIPTextModelWrapper.from_pretrained(
|
|
|
654 |
if str(device) == 'cpu':
|
655 |
self.face_app = FaceAnalysis(name='antelopev2', root='models/insightface',
|
656 |
providers=['CPUExecutionProvider'])
|
657 |
+
self.face_app.prepare(ctx_id=0, det_size=(256, 256))
|
658 |
else:
|
659 |
device_id = device.index
|
660 |
self.face_app = FaceAnalysis(name='antelopev2', root='models/insightface',
|
661 |
providers=['CUDAExecutionProvider'],
|
662 |
provider_options=[{"device_id": device_id,
|
663 |
'gpu_mem_limit': 2 * 1024 * 1024 * 1024}])
|
664 |
+
self.face_app.prepare(ctx_id=device_id, det_size=(256, 256))
|
665 |
|
666 |
self.device = device
|
667 |
print(f'Arc2Face Face encoder reloaded on {device}.')
|
|
|
800 |
if str(device) == 'cpu':
|
801 |
self.face_app = FaceAnalysis(name='buffalo_l', root='models/insightface',
|
802 |
providers=['CPUExecutionProvider'])
|
803 |
+
self.face_app.prepare(ctx_id=0, det_size=(256, 256))
|
804 |
else:
|
805 |
device_id = device.index
|
806 |
self.face_app = FaceAnalysis(name='buffalo_l', root='models/insightface',
|
807 |
providers=['CUDAExecutionProvider'],
|
808 |
provider_options=[{"device_id": device_id,
|
809 |
'gpu_mem_limit': 2 * 1024 * 1024 * 1024}])
|
810 |
+
self.face_app.prepare(ctx_id=device_id, det_size=(256, 256))
|
811 |
|
812 |
self.device = device
|
813 |
self.pipe.face_app = self.face_app
|