File size: 6,178 Bytes
ecc4278 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
from modules_forge.supported_preprocessor import PreprocessorClipVision, Preprocessor, PreprocessorParameter
from modules_forge.shared import add_supported_preprocessor
from modules_forge.forge_util import numpy_to_pytorch
from modules_forge.shared import add_supported_control_model
from modules_forge.supported_controlnet import ControlModelPatcher
from lib_ipadapter.IPAdapterPlus import IPAdapterApply, InsightFaceLoader
from pathlib import Path
opIPAdapterApply = IPAdapterApply().apply_ipadapter
opInsightFaceLoader = InsightFaceLoader().load_insight_face
class PreprocessorClipVisionForIPAdapter(PreprocessorClipVision):
def __init__(self, name, url, filename):
super().__init__(name, url, filename)
self.tags = ['IP-Adapter']
self.model_filename_filters = ['IP-Adapter', 'IP_Adapter']
self.sorting_priority = 20
def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs):
cond = dict(
clip_vision=self.load_clipvision(),
image=numpy_to_pytorch(input_image),
weight_type="original",
noise=0.0,
embeds=None,
unfold_batch=False,
)
return cond
class PreprocessorClipVisionWithInsightFaceForIPAdapter(PreprocessorClipVisionForIPAdapter):
def __init__(self, name, url, filename):
super().__init__(name, url, filename)
self.cached_insightface = None
def load_insightface(self):
if self.cached_insightface is None:
self.cached_insightface = opInsightFaceLoader()[0]
return self.cached_insightface
def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs):
cond = dict(
clip_vision=self.load_clipvision(),
insightface=self.load_insightface(),
image=numpy_to_pytorch(input_image),
weight_type="original",
noise=0.0,
embeds=None,
unfold_batch=False,
)
return cond
class PreprocessorInsightFaceForInstantID(Preprocessor):
def __init__(self, name):
super().__init__()
self.name = name
self.tags = ['Instant-ID']
self.model_filename_filters = ['Instant-ID', 'Instant_ID']
self.sorting_priority = 20
self.slider_resolution = PreprocessorParameter(visible=False)
self.corp_image_with_a1111_mask_when_in_img2img_inpaint_tab = False
self.show_control_mode = False
self.sorting_priority = 10
self.cached_insightface = None
def load_insightface(self):
if self.cached_insightface is None:
self.cached_insightface = opInsightFaceLoader(name='antelopev2')[0]
return self.cached_insightface
def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs):
cond = dict(
clip_vision=None,
insightface=self.load_insightface(),
image=numpy_to_pytorch(input_image),
weight_type="original",
noise=0.0,
embeds=None,
unfold_batch=False,
instant_id=True
)
return cond
add_supported_preprocessor(PreprocessorClipVisionForIPAdapter(
name='CLIP-ViT-H (IPAdapter)',
url='https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/model.safetensors',
filename='CLIP-ViT-H-14.safetensors'
))
add_supported_preprocessor(PreprocessorClipVisionForIPAdapter(
name='CLIP-ViT-bigG (IPAdapter)',
url='https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/model.safetensors',
filename='CLIP-ViT-bigG.safetensors'
))
add_supported_preprocessor(PreprocessorClipVisionWithInsightFaceForIPAdapter(
name='InsightFace+CLIP-H (IPAdapter)',
url='https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/model.safetensors',
filename='CLIP-ViT-H-14.safetensors'
))
add_supported_preprocessor(PreprocessorInsightFaceForInstantID(
name='InsightFace (InstantID)',
))
class IPAdapterPatcher(ControlModelPatcher):
@staticmethod
def try_build_from_state_dict(state_dict, ckpt_path):
model = state_dict
if ckpt_path.lower().endswith(".safetensors"):
st_model = {"image_proj": {}, "ip_adapter": {}}
for key in model.keys():
if key.startswith("image_proj."):
st_model["image_proj"][key.replace("image_proj.", "")] = model[key]
elif key.startswith("ip_adapter."):
st_model["ip_adapter"][key.replace("ip_adapter.", "")] = model[key]
model = st_model
if "ip_adapter" not in model.keys() or len(model["ip_adapter"]) == 0:
return None
model_filename = Path(ckpt_path).name.lower()
o = IPAdapterPatcher(model, model_filename)
if 'v2' in model_filename:
o.faceid_v2 = True
o.weight_v2 = True
return o
def __init__(self, state_dict, model_filename):
super().__init__()
self.ip_adapter = state_dict
self.model_filename = model_filename
self.faceid_v2 = False
self.weight_v2 = False
return
def process_before_every_sampling(self, process, cond, mask, *args, **kwargs):
unet = process.sd_model.forge_objects.unet
if self.positive_advanced_weighting is None:
weight = self.strength
cond["weight_type"] = "original"
else:
weight = self.positive_advanced_weighting
cond["weight_type"] = "advanced"
unet = opIPAdapterApply(
ipadapter=self.ip_adapter,
model_filename=self.model_filename,
model=unet,
weight=weight,
start_at=self.start_percent,
end_at=self.end_percent,
faceid_v2=self.faceid_v2,
weight_v2=self.weight_v2,
attn_mask=mask.squeeze(1) if mask is not None else None,
**cond,
)[0]
process.sd_model.forge_objects.unet = unet
return
add_supported_control_model(IPAdapterPatcher)
|