Upload folder using huggingface_hub
Browse files- main/README.md +1 -1
- main/dps_pipeline.py +3 -3
- main/fresco_v2v.py +4 -4
- main/hd_painter.py +1 -1
main/README.md
CHANGED
@@ -5381,7 +5381,7 @@ pipe = DiffusionPipeline.from_pretrained(
|
|
5381 |
# Here we need use pipeline internal unet model
|
5382 |
pipe.unet = pipe.unet_model.from_pretrained(model_id, subfolder="unet", variant="fp16", use_safetensors=True)
|
5383 |
|
5384 |
-
# Load
|
5385 |
pipe.unet.load_additional_layers(weight_path="proc_data/faithdiff/FaithDiff.bin", dtype=dtype)
|
5386 |
|
5387 |
# Enable vae tiling
|
|
|
5381 |
# Here we need use pipeline internal unet model
|
5382 |
pipe.unet = pipe.unet_model.from_pretrained(model_id, subfolder="unet", variant="fp16", use_safetensors=True)
|
5383 |
|
5384 |
+
# Load additional layers to the model
|
5385 |
pipe.unet.load_additional_layers(weight_path="proc_data/faithdiff/FaithDiff.bin", dtype=dtype)
|
5386 |
|
5387 |
# Enable vae tiling
|
main/dps_pipeline.py
CHANGED
@@ -312,9 +312,9 @@ if __name__ == "__main__":
|
|
312 |
# These are the coordinates of the output image
|
313 |
out_coordinates = np.arange(1, out_length + 1)
|
314 |
|
315 |
-
# since both scale-factor and output size can be provided
|
316 |
-
# the output coordinates. the deviation is because out_length doesn't
|
317 |
-
# to keep the center we need to subtract half of this
|
318 |
shifted_out_coordinates = out_coordinates - (out_length - in_length * scale) / 2
|
319 |
|
320 |
# These are the matching positions of the output-coordinates on the input image coordinates.
|
|
|
312 |
# These are the coordinates of the output image
|
313 |
out_coordinates = np.arange(1, out_length + 1)
|
314 |
|
315 |
+
# since both scale-factor and output size can be provided simultaneously, preserving the center of the image requires shifting
|
316 |
+
# the output coordinates. the deviation is because out_length doesn't necessary equal in_length*scale.
|
317 |
+
# to keep the center we need to subtract half of this deviation so that we get equal margins for both sides and center is preserved.
|
318 |
shifted_out_coordinates = out_coordinates - (out_length - in_length * scale) / 2
|
319 |
|
320 |
# These are the matching positions of the output-coordinates on the input image coordinates.
|
main/fresco_v2v.py
CHANGED
@@ -351,7 +351,7 @@ def my_forward(
|
|
351 |
cross_attention_kwargs (`dict`, *optional*):
|
352 |
A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].
|
353 |
added_cond_kwargs: (`dict`, *optional*):
|
354 |
-
A kwargs dictionary
|
355 |
are passed along to the UNet blocks.
|
356 |
|
357 |
Returns:
|
@@ -864,9 +864,9 @@ def get_flow_and_interframe_paras(flow_model, imgs):
|
|
864 |
class AttentionControl:
|
865 |
"""
|
866 |
Control FRESCO-based attention
|
867 |
-
* enable/
|
868 |
-
* enable/
|
869 |
-
* enable/
|
870 |
* collect intermediate attention feature (for spatial-guided attention)
|
871 |
"""
|
872 |
|
|
|
351 |
cross_attention_kwargs (`dict`, *optional*):
|
352 |
A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].
|
353 |
added_cond_kwargs: (`dict`, *optional*):
|
354 |
+
A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
|
355 |
are passed along to the UNet blocks.
|
356 |
|
357 |
Returns:
|
|
|
864 |
class AttentionControl:
|
865 |
"""
|
866 |
Control FRESCO-based attention
|
867 |
+
* enable/disable spatial-guided attention
|
868 |
+
* enable/disable temporal-guided attention
|
869 |
+
* enable/disable cross-frame attention
|
870 |
* collect intermediate attention feature (for spatial-guided attention)
|
871 |
"""
|
872 |
|
main/hd_painter.py
CHANGED
@@ -34,7 +34,7 @@ class RASGAttnProcessor:
|
|
34 |
temb: Optional[torch.Tensor] = None,
|
35 |
scale: float = 1.0,
|
36 |
) -> torch.Tensor:
|
37 |
-
# Same as the default AttnProcessor up
|
38 |
downscale_factor = self.mask_resoltuion // hidden_states.shape[1]
|
39 |
residual = hidden_states
|
40 |
|
|
|
34 |
temb: Optional[torch.Tensor] = None,
|
35 |
scale: float = 1.0,
|
36 |
) -> torch.Tensor:
|
37 |
+
# Same as the default AttnProcessor up until the part where similarity matrix gets saved
|
38 |
downscale_factor = self.mask_resoltuion // hidden_states.shape[1]
|
39 |
residual = hidden_states
|
40 |
|