text
stringlengths
7
318k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
439
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import UNet2DConditionLoadersMixin from ...utils import BaseOutput, logging from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnProcessor from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from .unet_3d_blocks import UNetMidBlockSpatioTemporal, get_down_block, get_up_block logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNetSpatioTemporalConditionOutput(BaseOutput): """ The output of [`UNetSpatioTemporalConditionModel`]. Args: sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.FloatTensor = None class UNetSpatioTemporalConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" A conditional Spatio-Temporal UNet model that takes a noisy video frames, conditional state, and a timestep and returns a sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 8): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlockSpatioTemporal", "CrossAttnDownBlockSpatioTemporal", "CrossAttnDownBlockSpatioTemporal", "DownBlockSpatioTemporal")`): The tuple of downsample blocks to use. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal")`): The tuple of upsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. addition_time_embed_dim: (`int`, defaults to 256): Dimension to to encode the additional time ids. projection_class_embeddings_input_dim (`int`, defaults to 768): The dimension of the projection of encoded `added_time_ids`. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_3d_blocks.CrossAttnDownBlockSpatioTemporal`], [`~models.unet_3d_blocks.CrossAttnUpBlockSpatioTemporal`], [`~models.unet_3d_blocks.UNetMidBlockSpatioTemporal`]. num_attention_heads (`int`, `Tuple[int]`, defaults to `(5, 10, 10, 20)`): The number of attention heads. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 8, out_channels: int = 4, down_block_types: Tuple[str] = ( "CrossAttnDownBlockSpatioTemporal", "CrossAttnDownBlockSpatioTemporal", "CrossAttnDownBlockSpatioTemporal", "DownBlockSpatioTemporal", ), up_block_types: Tuple[str] = ( "UpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", ), block_out_channels: Tuple[int] = (320, 640, 1280, 1280), addition_time_embed_dim: int = 256, projection_class_embeddings_input_dim: int = 768, layers_per_block: Union[int, Tuple[int]] = 2, cross_attention_dim: Union[int, Tuple[int]] = 1024, transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1, num_attention_heads: Union[int, Tuple[int]] = (5, 10, 10, 20), num_frames: int = 25, ): super().__init__() self.sample_size = sample_size # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) # input self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=3, padding=1, ) # time time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], True, downscale_freq_shift=0) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) self.add_time_proj = Timesteps(addition_time_embed_dim, True, downscale_freq_shift=0) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=1e-5, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], resnet_act_fn="silu", ) self.down_blocks.append(down_block) # mid self.mid_block = UNetMidBlockSpatioTemporal( block_out_channels[-1], temb_channels=blocks_time_embed_dim, transformer_layers_per_block=transformer_layers_per_block[-1], cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], ) # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( up_block_type, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=blocks_time_embed_dim, add_upsample=add_upsample, resnet_eps=1e-5, resolution_idx=i, cross_attention_dim=reversed_cross_attention_dim[i], num_attention_heads=reversed_num_attention_heads[i], resnet_act_fn="silu", ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=32, eps=1e-5) self.conv_act = nn.SiLU() self.conv_out = nn.Conv2d( block_out_channels[0], out_channels, kernel_size=3, padding=1, ) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors( name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor], ): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None: """ Sets the attention processor to use [feed forward chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers). Parameters: chunk_size (`int`, *optional*): The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually over each tensor of dim=`dim`. dim (`int`, *optional*, defaults to `0`): The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch) or dim=1 (sequence length). """ if dim not in [0, 1]: raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}") # By default chunk size is 1 chunk_size = chunk_size or 1 def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, "set_chunk_feed_forward"): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, chunk_size, dim) def forward( self, sample: torch.FloatTensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, added_time_ids: torch.Tensor, return_dict: bool = True, ) -> Union[UNetSpatioTemporalConditionOutput, Tuple]: r""" The [`UNetSpatioTemporalConditionModel`] forward method. Args: sample (`torch.FloatTensor`): The noisy input tensor with the following shape `(batch, num_frames, channel, height, width)`. timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.FloatTensor`): The encoder hidden states with shape `(batch, sequence_length, cross_attention_dim)`. added_time_ids: (`torch.FloatTensor`): The additional time ids with shape `(batch, num_additional_ids)`. These are encoded with sinusoidal embeddings and added to the time embeddings. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unet_slatio_temporal.UNetSpatioTemporalConditionOutput`] instead of a plain tuple. Returns: [`~models.unet_slatio_temporal.UNetSpatioTemporalConditionOutput`] or `tuple`: If `return_dict` is True, an [`~models.unet_slatio_temporal.UNetSpatioTemporalConditionOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML batch_size, num_frames = sample.shape[:2] timesteps = timesteps.expand(batch_size) t_emb = self.time_proj(timesteps) # `Timesteps` does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb) time_embeds = self.add_time_proj(added_time_ids.flatten()) time_embeds = time_embeds.reshape((batch_size, -1)) time_embeds = time_embeds.to(emb.dtype) aug_emb = self.add_embedding(time_embeds) emb = emb + aug_emb # Flatten the batch and frames dimensions # sample: [batch, frames, channels, height, width] -> [batch * frames, channels, height, width] sample = sample.flatten(0, 1) # Repeat the embeddings num_video_frames times # emb: [batch, channels] -> [batch * frames, channels] emb = emb.repeat_interleave(num_frames, dim=0) # encoder_hidden_states: [batch, 1, channels] -> [batch * frames, 1, channels] encoder_hidden_states = encoder_hidden_states.repeat_interleave(num_frames, dim=0) # 2. pre-process sample = self.conv_in(sample) image_only_indicator = torch.zeros(batch_size, num_frames, dtype=sample.dtype, device=sample.device) down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, ) else: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, image_only_indicator=image_only_indicator, ) down_block_res_samples += res_samples # 4. mid sample = self.mid_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, ) # 5. up for i, upsample_block in enumerate(self.up_blocks): res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, ) else: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, image_only_indicator=image_only_indicator, ) # 6. post-process sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) # 7. Reshape back to original shape sample = sample.reshape(batch_size, num_frames, *sample.shape[1:]) if not return_dict: return (sample,) return UNetSpatioTemporalConditionOutput(sample=sample)
diffusers/src/diffusers/models/unets/unet_spatio_temporal_condition.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_spatio_temporal_condition.py", "repo_id": "diffusers", "token_count": 9881 }
115
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available, is_transformers_version, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( AudioLDMPipeline, ) _dummy_objects.update({"AudioLDMPipeline": AudioLDMPipeline}) else: _import_structure["pipeline_audioldm"] = ["AudioLDMPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( AudioLDMPipeline, ) else: from .pipeline_audioldm import AudioLDMPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/audioldm/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/audioldm/__init__.py", "repo_id": "diffusers", "token_count": 581 }
116
# Copyright 2023 Salesforce.com, inc. # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Union import PIL.Image import torch from transformers import CLIPTokenizer from ...models import AutoencoderKL, ControlNetModel, UNet2DConditionModel from ...schedulers import PNDMScheduler from ...utils import ( logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..blip_diffusion.blip_image_processing import BlipImageProcessor from ..blip_diffusion.modeling_blip2 import Blip2QFormerModel from ..blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from diffusers.pipelines import BlipDiffusionControlNetPipeline >>> from diffusers.utils import load_image >>> from controlnet_aux import CannyDetector >>> import torch >>> blip_diffusion_pipe = BlipDiffusionControlNetPipeline.from_pretrained( ... "Salesforce/blipdiffusion-controlnet", torch_dtype=torch.float16 ... ).to("cuda") >>> style_subject = "flower" >>> tgt_subject = "teapot" >>> text_prompt = "on a marble table" >>> cldm_cond_image = load_image( ... "https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/kettle.jpg" ... ).resize((512, 512)) >>> canny = CannyDetector() >>> cldm_cond_image = canny(cldm_cond_image, 30, 70, output_type="pil") >>> style_image = load_image( ... "https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/flower.jpg" ... ) >>> guidance_scale = 7.5 >>> num_inference_steps = 50 >>> negative_prompt = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate" >>> output = blip_diffusion_pipe( ... text_prompt, ... style_image, ... cldm_cond_image, ... style_subject, ... tgt_subject, ... guidance_scale=guidance_scale, ... num_inference_steps=num_inference_steps, ... neg_prompt=negative_prompt, ... height=512, ... width=512, ... ).images >>> output[0].save("image.png") ``` """ class BlipDiffusionControlNetPipeline(DiffusionPipeline): """ Pipeline for Canny Edge based Controlled subject-driven generation using Blip Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: tokenizer ([`CLIPTokenizer`]): Tokenizer for the text encoder text_encoder ([`ContextCLIPTextModel`]): Text encoder to encode the text prompt vae ([`AutoencoderKL`]): VAE model to map the latents to the image unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. scheduler ([`PNDMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. qformer ([`Blip2QFormerModel`]): QFormer model to get multi-modal embeddings from the text and image. controlnet ([`ControlNetModel`]): ControlNet model to get the conditioning image embedding. image_processor ([`BlipImageProcessor`]): Image Processor to preprocess and postprocess the image. ctx_begin_pos (int, `optional`, defaults to 2): Position of the context token in the text encoder. """ model_cpu_offload_seq = "qformer->text_encoder->unet->vae" def __init__( self, tokenizer: CLIPTokenizer, text_encoder: ContextCLIPTextModel, vae: AutoencoderKL, unet: UNet2DConditionModel, scheduler: PNDMScheduler, qformer: Blip2QFormerModel, controlnet: ControlNetModel, image_processor: BlipImageProcessor, ctx_begin_pos: int = 2, mean: List[float] = None, std: List[float] = None, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, unet=unet, scheduler=scheduler, qformer=qformer, controlnet=controlnet, image_processor=image_processor, ) self.register_to_config(ctx_begin_pos=ctx_begin_pos, mean=mean, std=std) def get_query_embeddings(self, input_image, src_subject): return self.qformer(image_input=input_image, text_input=src_subject, return_dict=False) # from the original Blip Diffusion code, speciefies the target subject and augments the prompt by repeating it def _build_prompt(self, prompts, tgt_subjects, prompt_strength=1.0, prompt_reps=20): rv = [] for prompt, tgt_subject in zip(prompts, tgt_subjects): prompt = f"a {tgt_subject} {prompt.strip()}" # a trick to amplify the prompt rv.append(", ".join([prompt] * int(prompt_strength * prompt_reps))) return rv # Copied from diffusers.pipelines.consistency_models.pipeline_consistency_models.ConsistencyModelPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def encode_prompt(self, query_embeds, prompt, device=None): device = device or self._execution_device # embeddings for prompt, with query_embeds as context max_len = self.text_encoder.text_model.config.max_position_embeddings max_len -= self.qformer.config.num_query_tokens tokenized_prompt = self.tokenizer( prompt, padding="max_length", truncation=True, max_length=max_len, return_tensors="pt", ).to(device) batch_size = query_embeds.shape[0] ctx_begin_pos = [self.config.ctx_begin_pos] * batch_size text_embeddings = self.text_encoder( input_ids=tokenized_prompt.input_ids, ctx_embeddings=query_embeds, ctx_begin_pos=ctx_begin_pos, )[0] return text_embeddings # Adapted from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image def prepare_control_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, ): image = self.image_processor.preprocess( image, size={"width": width, "height": height}, do_rescale=True, do_center_crop=False, do_normalize=False, return_tensors="pt", )["pixel_values"].to(device) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance: image = torch.cat([image] * 2) return image @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: List[str], reference_image: PIL.Image.Image, condtioning_image: PIL.Image.Image, source_subject_category: List[str], target_subject_category: List[str], latents: Optional[torch.FloatTensor] = None, guidance_scale: float = 7.5, height: int = 512, width: int = 512, num_inference_steps: int = 50, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, neg_prompt: Optional[str] = "", prompt_strength: float = 1.0, prompt_reps: int = 20, output_type: Optional[str] = "pil", return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`List[str]`): The prompt or prompts to guide the image generation. reference_image (`PIL.Image.Image`): The reference image to condition the generation on. condtioning_image (`PIL.Image.Image`): The conditioning canny edge image to condition the generation on. source_subject_category (`List[str]`): The source subject category. target_subject_category (`List[str]`): The target subject category. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by random sampling. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. height (`int`, *optional*, defaults to 512): The height of the generated image. width (`int`, *optional*, defaults to 512): The width of the generated image. seed (`int`, *optional*, defaults to 42): The seed to use for random generation. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. neg_prompt (`str`, *optional*, defaults to ""): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_strength (`float`, *optional*, defaults to 1.0): The strength of the prompt. Specifies the number of times the prompt is repeated along with prompt_reps to amplify the prompt. prompt_reps (`int`, *optional*, defaults to 20): The number of times the prompt is repeated along with prompt_strength to amplify the prompt. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ device = self._execution_device reference_image = self.image_processor.preprocess( reference_image, image_mean=self.config.mean, image_std=self.config.std, return_tensors="pt" )["pixel_values"] reference_image = reference_image.to(device) if isinstance(prompt, str): prompt = [prompt] if isinstance(source_subject_category, str): source_subject_category = [source_subject_category] if isinstance(target_subject_category, str): target_subject_category = [target_subject_category] batch_size = len(prompt) prompt = self._build_prompt( prompts=prompt, tgt_subjects=target_subject_category, prompt_strength=prompt_strength, prompt_reps=prompt_reps, ) query_embeds = self.get_query_embeddings(reference_image, source_subject_category) text_embeddings = self.encode_prompt(query_embeds, prompt, device) # 3. unconditional embedding do_classifier_free_guidance = guidance_scale > 1.0 if do_classifier_free_guidance: max_length = self.text_encoder.text_model.config.max_position_embeddings uncond_input = self.tokenizer( [neg_prompt] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt", ) uncond_embeddings = self.text_encoder( input_ids=uncond_input.input_ids.to(device), ctx_embeddings=None, )[0] # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) scale_down_factor = 2 ** (len(self.unet.config.block_out_channels) - 1) latents = self.prepare_latents( batch_size=batch_size, num_channels=self.unet.config.in_channels, height=height // scale_down_factor, width=width // scale_down_factor, generator=generator, latents=latents, dtype=self.unet.dtype, device=device, ) # set timesteps extra_set_kwargs = {} self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) cond_image = self.prepare_control_image( image=condtioning_image, width=width, height=height, batch_size=batch_size, num_images_per_prompt=1, device=device, dtype=self.controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): # expand the latents if we are doing classifier free guidance do_classifier_free_guidance = guidance_scale > 1.0 latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents down_block_res_samples, mid_block_res_sample = self.controlnet( latent_model_input, t, encoder_hidden_states=text_embeddings, controlnet_cond=cond_image, return_dict=False, ) noise_pred = self.unet( latent_model_input, timestep=t, encoder_hidden_states=text_embeddings, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, )["sample"] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step( noise_pred, t, latents, )["prev_sample"] image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py", "repo_id": "diffusers", "token_count": 7634 }
117
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np # noqa: E402 from ....configuration_utils import ConfigMixin, register_to_config from ....schedulers.scheduling_utils import SchedulerMixin try: import librosa # noqa: E402 _librosa_can_be_imported = True _import_error = "" except Exception as e: _librosa_can_be_imported = False _import_error = ( f"Cannot import librosa because {e}. Make sure to correctly install librosa to be able to install it." ) from PIL import Image # noqa: E402 class Mel(ConfigMixin, SchedulerMixin): """ Parameters: x_res (`int`): x resolution of spectrogram (time). y_res (`int`): y resolution of spectrogram (frequency bins). sample_rate (`int`): Sample rate of audio. n_fft (`int`): Number of Fast Fourier Transforms. hop_length (`int`): Hop length (a higher number is recommended if `y_res` < 256). top_db (`int`): Loudest decibel value. n_iter (`int`): Number of iterations for Griffin-Lim Mel inversion. """ config_name = "mel_config.json" @register_to_config def __init__( self, x_res: int = 256, y_res: int = 256, sample_rate: int = 22050, n_fft: int = 2048, hop_length: int = 512, top_db: int = 80, n_iter: int = 32, ): self.hop_length = hop_length self.sr = sample_rate self.n_fft = n_fft self.top_db = top_db self.n_iter = n_iter self.set_resolution(x_res, y_res) self.audio = None if not _librosa_can_be_imported: raise ValueError(_import_error) def set_resolution(self, x_res: int, y_res: int): """Set resolution. Args: x_res (`int`): x resolution of spectrogram (time). y_res (`int`): y resolution of spectrogram (frequency bins). """ self.x_res = x_res self.y_res = y_res self.n_mels = self.y_res self.slice_size = self.x_res * self.hop_length - 1 def load_audio(self, audio_file: str = None, raw_audio: np.ndarray = None): """Load audio. Args: audio_file (`str`): An audio file that must be on disk due to [Librosa](https://librosa.org/) limitation. raw_audio (`np.ndarray`): The raw audio file as a NumPy array. """ if audio_file is not None: self.audio, _ = librosa.load(audio_file, mono=True, sr=self.sr) else: self.audio = raw_audio # Pad with silence if necessary. if len(self.audio) < self.x_res * self.hop_length: self.audio = np.concatenate([self.audio, np.zeros((self.x_res * self.hop_length - len(self.audio),))]) def get_number_of_slices(self) -> int: """Get number of slices in audio. Returns: `int`: Number of spectograms audio can be sliced into. """ return len(self.audio) // self.slice_size def get_audio_slice(self, slice: int = 0) -> np.ndarray: """Get slice of audio. Args: slice (`int`): Slice number of audio (out of `get_number_of_slices()`). Returns: `np.ndarray`: The audio slice as a NumPy array. """ return self.audio[self.slice_size * slice : self.slice_size * (slice + 1)] def get_sample_rate(self) -> int: """Get sample rate. Returns: `int`: Sample rate of audio. """ return self.sr def audio_slice_to_image(self, slice: int) -> Image.Image: """Convert slice of audio to spectrogram. Args: slice (`int`): Slice number of audio to convert (out of `get_number_of_slices()`). Returns: `PIL Image`: A grayscale image of `x_res x y_res`. """ S = librosa.feature.melspectrogram( y=self.get_audio_slice(slice), sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_mels=self.n_mels ) log_S = librosa.power_to_db(S, ref=np.max, top_db=self.top_db) bytedata = (((log_S + self.top_db) * 255 / self.top_db).clip(0, 255) + 0.5).astype(np.uint8) image = Image.fromarray(bytedata) return image def image_to_audio(self, image: Image.Image) -> np.ndarray: """Converts spectrogram to audio. Args: image (`PIL Image`): An grayscale image of `x_res x y_res`. Returns: audio (`np.ndarray`): The audio as a NumPy array. """ bytedata = np.frombuffer(image.tobytes(), dtype="uint8").reshape((image.height, image.width)) log_S = bytedata.astype("float") * self.top_db / 255 - self.top_db S = librosa.db_to_power(log_S) audio = librosa.feature.inverse.mel_to_audio( S, sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_iter=self.n_iter ) return audio
diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py", "repo_id": "diffusers", "token_count": 2699 }
118
from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {"pipeline_dit": ["DiTPipeline"]} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_dit import DiTPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, )
diffusers/src/diffusers/pipelines/dit/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/dit/__init__.py", "repo_id": "diffusers", "token_count": 177 }
119
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from huggingface_hub.utils import validate_hf_hub_args from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort logger = logging.get_logger(__name__) ORT_TO_NP_TYPE = { "tensor(bool)": np.bool_, "tensor(int8)": np.int8, "tensor(uint8)": np.uint8, "tensor(int16)": np.int16, "tensor(uint16)": np.uint16, "tensor(int32)": np.int32, "tensor(uint32)": np.uint32, "tensor(int64)": np.int64, "tensor(uint64)": np.uint64, "tensor(float16)": np.float16, "tensor(float)": np.float32, "tensor(double)": np.float64, } class OnnxRuntimeModel: def __init__(self, model=None, **kwargs): logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future.") self.model = model self.model_save_dir = kwargs.get("model_save_dir", None) self.latest_model_name = kwargs.get("latest_model_name", ONNX_WEIGHTS_NAME) def __call__(self, **kwargs): inputs = {k: np.array(v) for k, v in kwargs.items()} return self.model.run(None, inputs) @staticmethod def load_model(path: Union[str, Path], provider=None, sess_options=None): """ Loads an ONNX Inference session with an ExecutionProvider. Default provider is `CPUExecutionProvider` Arguments: path (`str` or `Path`): Directory from which to load provider(`str`, *optional*): Onnxruntime execution provider to use for loading the model, defaults to `CPUExecutionProvider` """ if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider") provider = "CPUExecutionProvider" return ort.InferenceSession(path, providers=[provider], sess_options=sess_options) def _save_pretrained(self, save_directory: Union[str, Path], file_name: Optional[str] = None, **kwargs): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the [`~optimum.onnxruntime.modeling_ort.ORTModel.from_pretrained`] class method. It will always save the latest_model_name. Arguments: save_directory (`str` or `Path`): Directory where to save the model file. file_name(`str`, *optional*): Overwrites the default model file name from `"model.onnx"` to `file_name`. This allows you to save the model with a different name. """ model_file_name = file_name if file_name is not None else ONNX_WEIGHTS_NAME src_path = self.model_save_dir.joinpath(self.latest_model_name) dst_path = Path(save_directory).joinpath(model_file_name) try: shutil.copyfile(src_path, dst_path) except shutil.SameFileError: pass # copy external weights (for models >2GB) src_path = self.model_save_dir.joinpath(ONNX_EXTERNAL_WEIGHTS_NAME) if src_path.exists(): dst_path = Path(save_directory).joinpath(ONNX_EXTERNAL_WEIGHTS_NAME) try: shutil.copyfile(src_path, dst_path) except shutil.SameFileError: pass def save_pretrained( self, save_directory: Union[str, os.PathLike], **kwargs, ): """ Save a model to a directory, so that it can be re-loaded using the [`~OnnxModel.from_pretrained`] class method.: Arguments: save_directory (`str` or `os.PathLike`): Directory to which to save. Will be created if it doesn't exist. """ if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) # saving model weights/files self._save_pretrained(save_directory, **kwargs) @classmethod @validate_hf_hub_args def _from_pretrained( cls, model_id: Union[str, Path], token: Optional[Union[bool, str, None]] = None, revision: Optional[Union[str, None]] = None, force_download: bool = False, cache_dir: Optional[str] = None, file_name: Optional[str] = None, provider: Optional[str] = None, sess_options: Optional["ort.SessionOptions"] = None, **kwargs, ): """ Load a model from a directory or the HF Hub. Arguments: model_id (`str` or `Path`): Directory from which to load token (`str` or `bool`): Is needed to load models from a private or gated repository revision (`str`): Revision is the specific model version to use. It can be a branch name, a tag name, or a commit id cache_dir (`Union[str, Path]`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. file_name(`str`): Overwrites the default model file name from `"model.onnx"` to `file_name`. This allows you to load different model files from the same repository or directory. provider(`str`): The ONNX runtime provider, e.g. `CPUExecutionProvider` or `CUDAExecutionProvider`. kwargs (`Dict`, *optional*): kwargs will be passed to the model during initialization """ model_file_name = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(model_id): model = OnnxRuntimeModel.load_model( Path(model_id, model_file_name).as_posix(), provider=provider, sess_options=sess_options ) kwargs["model_save_dir"] = Path(model_id) # load model from hub else: # download model model_cache_path = hf_hub_download( repo_id=model_id, filename=model_file_name, token=token, revision=revision, cache_dir=cache_dir, force_download=force_download, ) kwargs["model_save_dir"] = Path(model_cache_path).parent kwargs["latest_model_name"] = Path(model_cache_path).name model = OnnxRuntimeModel.load_model(model_cache_path, provider=provider, sess_options=sess_options) return cls(model=model, **kwargs) @classmethod @validate_hf_hub_args def from_pretrained( cls, model_id: Union[str, Path], force_download: bool = True, token: Optional[str] = None, cache_dir: Optional[str] = None, **model_kwargs, ): revision = None if len(str(model_id).split("@")) == 2: model_id, revision = model_id.split("@") return cls._from_pretrained( model_id=model_id, revision=revision, cache_dir=cache_dir, force_download=force_download, token=token, **model_kwargs, )
diffusers/src/diffusers/pipelines/onnx_utils.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/onnx_utils.py", "repo_id": "diffusers", "token_count": 3623 }
120
# Copyright 2023 Open AI and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .renderer import ShapERenderer logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from PIL import Image >>> import torch >>> from diffusers import DiffusionPipeline >>> from diffusers.utils import export_to_gif, load_image >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") >>> repo = "openai/shap-e-img2img" >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) >>> pipe = pipe.to(device) >>> guidance_scale = 3.0 >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png" >>> image = load_image(image_url).convert("RGB") >>> images = pipe( ... image, ... guidance_scale=guidance_scale, ... num_inference_steps=64, ... frame_size=256, ... ).images >>> gif_path = export_to_gif(images[0], "corgi_3d.gif") ``` """ @dataclass class ShapEPipelineOutput(BaseOutput): """ Output class for [`ShapEPipeline`] and [`ShapEImg2ImgPipeline`]. Args: images (`torch.FloatTensor`) A list of images for 3D rendering. """ images: Union[PIL.Image.Image, np.ndarray] class ShapEImg2ImgPipeline(DiffusionPipeline): """ Pipeline for generating latent representation of a 3D asset and rendering with the NeRF method from an image. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: prior ([`PriorTransformer`]): The canonincal unCLIP prior to approximate the image embedding from the text embedding. image_encoder ([`~transformers.CLIPVisionModel`]): Frozen image-encoder. image_processor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to process images. scheduler ([`HeunDiscreteScheduler`]): A scheduler to be used in combination with the `prior` model to generate image embedding. shap_e_renderer ([`ShapERenderer`]): Shap-E renderer projects the generated latents into parameters of a MLP to create 3D objects with the NeRF rendering method. """ model_cpu_offload_seq = "image_encoder->prior" _exclude_from_cpu_offload = ["shap_e_renderer"] def __init__( self, prior: PriorTransformer, image_encoder: CLIPVisionModel, image_processor: CLIPImageProcessor, scheduler: HeunDiscreteScheduler, shap_e_renderer: ShapERenderer, ): super().__init__() self.register_modules( prior=prior, image_encoder=image_encoder, image_processor=image_processor, scheduler=scheduler, shap_e_renderer=shap_e_renderer, ) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def _encode_image( self, image, device, num_images_per_prompt, do_classifier_free_guidance, ): if isinstance(image, List) and isinstance(image[0], torch.Tensor): image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) if not isinstance(image, torch.Tensor): image = self.image_processor(image, return_tensors="pt").pixel_values[0].unsqueeze(0) image = image.to(dtype=self.image_encoder.dtype, device=device) image_embeds = self.image_encoder(image)["last_hidden_state"] image_embeds = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: negative_image_embeds = torch.zeros_like(image_embeds) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes image_embeds = torch.cat([negative_image_embeds, image_embeds]) return image_embeds @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: Union[PIL.Image.Image, List[PIL.Image.Image]], num_images_per_prompt: int = 1, num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, guidance_scale: float = 4.0, frame_size: int = 64, output_type: Optional[str] = "pil", # pil, np, latent, mesh return_dict: bool = True, ): """ The call function to the pipeline for generation. Args: image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image` or tensor representing an image batch to be used as the starting point. Can also accept image latents as image, but if passing latents directly it is not encoded again. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. guidance_scale (`float`, *optional*, defaults to 4.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. frame_size (`int`, *optional*, default to 64): The width and height of each image frame of the generated 3D output. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`), `"latent"` (`torch.Tensor`), or mesh ([`MeshDecoderOutput`]). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if isinstance(image, PIL.Image.Image): batch_size = 1 elif isinstance(image, torch.Tensor): batch_size = image.shape[0] elif isinstance(image, list) and isinstance(image[0], (torch.Tensor, PIL.Image.Image)): batch_size = len(image) else: raise ValueError( f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(image)}" ) device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = guidance_scale > 1.0 image_embeds = self._encode_image(image, device, num_images_per_prompt, do_classifier_free_guidance) # prior self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_embeddings = self.prior.config.num_embeddings embedding_dim = self.prior.config.embedding_dim latents = self.prepare_latents( (batch_size, num_embeddings * embedding_dim), image_embeds.dtype, device, generator, latents, self.scheduler, ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim latents = latents.reshape(latents.shape[0], num_embeddings, embedding_dim) for i, t in enumerate(self.progress_bar(timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.prior( scaled_model_input, timestep=t, proj_embedding=image_embeds, ).predicted_image_embedding # remove the variance noise_pred, _ = noise_pred.split( scaled_model_input.shape[2], dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance: noise_pred_uncond, noise_pred = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) latents = self.scheduler.step( noise_pred, timestep=t, sample=latents, ).prev_sample if output_type not in ["np", "pil", "latent", "mesh"]: raise ValueError( f"Only the output types `pil`, `np`, `latent` and `mesh` are supported not output_type={output_type}" ) # Offload all models self.maybe_free_model_hooks() if output_type == "latent": return ShapEPipelineOutput(images=latents) images = [] if output_type == "mesh": for i, latent in enumerate(latents): mesh = self.shap_e_renderer.decode_to_mesh( latent[None, :], device, ) images.append(mesh) else: # np, pil for i, latent in enumerate(latents): image = self.shap_e_renderer.decode_to_image( latent[None, :], device, size=frame_size, ) images.append(image) images = torch.stack(images) images = images.cpu().numpy() if output_type == "pil": images = [self.numpy_to_pil(image) for image in images] if not return_dict: return (images,) return ShapEPipelineOutput(images=images)
diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py", "repo_id": "diffusers", "token_count": 5666 }
121
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, List, Optional, Union import PIL.Image import torch from packaging import version from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from ...configuration_utils import FrozenDict from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import deprecate, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from . import StableDiffusionPipelineOutput from .safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name class StableDiffusionImageVariationPipeline(DiffusionPipeline): r""" Pipeline to generate image variations from an input image using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. image_encoder ([`~transformers.CLIPVisionModelWithProjection`]): Frozen CLIP image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ # TODO: feature_extractor is required to encode images (if they are in PIL format), # we should give a descriptive message if the pipeline doesn't have one. _optional_components = ["safety_checker"] model_cpu_offload_seq = "image_encoder->unet->vae" _exclude_from_cpu_offload = ["safety_checker"] def __init__( self, vae: AutoencoderKL, image_encoder: CLIPVisionModelWithProjection, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if safety_checker is None and requires_safety_checker: logger.warn( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, image_encoder=image_encoder, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(images=image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) image_embeddings = self.image_encoder(image).image_embeds image_embeddings = image_embeddings.unsqueeze(1) # duplicate image embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: negative_prompt_embeds = torch.zeros_like(image_embeddings) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) return image_embeddings # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, image, height, width, callback_steps): if ( not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image) and not isinstance(image, list) ): raise ValueError( "`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" f" {type(image)}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu def enable_freeu(self, s1: float, s2: float, b1: float, b2: float): r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497. The suffixes after the scaling factors represent the stages where they are being applied. Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. Args: s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate "oversmoothing effect" in the enhanced denoising process. s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate "oversmoothing effect" in the enhanced denoising process. b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. """ if not hasattr(self, "unet"): raise ValueError("The pipeline must have `unet` for using FreeU.") self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu def disable_freeu(self): """Disables the FreeU mechanism if enabled.""" self.unet.disable_freeu() @torch.no_grad() def __call__( self, image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.FloatTensor], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, ): r""" The call function to the pipeline for generation. Args: image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`): Image or images to guide image generation. If you provide a tensor, it needs to be compatible with [`CLIPImageProcessor`](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json). height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter is modulated by `strength`. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. Examples: ```py from diffusers import StableDiffusionImageVariationPipeline from PIL import Image from io import BytesIO import requests pipe = StableDiffusionImageVariationPipeline.from_pretrained( "lambdalabs/sd-image-variations-diffusers", revision="v2.0" ) pipe = pipe.to("cuda") url = "https://lh3.googleusercontent.com/y-iFOHfLTwkuQSUegpwDdgKmOjRSTvPxat63dQLB25xkTs4lhIbRUFeNBWZzYf370g=s1200" response = requests.get(url) image = Image.open(BytesIO(response.content)).convert("RGB") out = pipe(image, num_images_per_prompt=3, guidance_scale=15) out["images"][0].save("result.jpg") ``` """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(image, height, width, callback_steps) # 2. Define call parameters if isinstance(image, PIL.Image.Image): batch_size = 1 elif isinstance(image, list): batch_size = len(image) else: batch_size = image.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input image image_embeddings = self._encode_image(image, device, num_images_per_prompt, do_classifier_free_guidance) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, image_embeddings.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) self.maybe_free_model_hooks() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, image_embeddings.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py", "repo_id": "diffusers", "token_count": 9796 }
122
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial from typing import Dict, List, Optional, Union import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from transformers import CLIPTokenizer, FlaxCLIPTextModel from diffusers.utils import logging from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel from ...schedulers import ( FlaxDDIMScheduler, FlaxDPMSolverMultistepScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, ) from ..pipeline_flax_utils import FlaxDiffusionPipeline from .pipeline_output import FlaxStableDiffusionXLPipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Set to True to use python for loop instead of jax.fori_loop for easier debugging DEBUG = False class FlaxStableDiffusionXLPipeline(FlaxDiffusionPipeline): def __init__( self, text_encoder: FlaxCLIPTextModel, text_encoder_2: FlaxCLIPTextModel, vae: FlaxAutoencoderKL, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: FlaxUNet2DConditionModel, scheduler: Union[ FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler ], dtype: jnp.dtype = jnp.float32, ): super().__init__() self.dtype = dtype self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def prepare_inputs(self, prompt: Union[str, List[str]]): if not isinstance(prompt, (str, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") # Assume we have the two encoders inputs = [] for tokenizer in [self.tokenizer, self.tokenizer_2]: text_inputs = tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="np", ) inputs.append(text_inputs.input_ids) inputs = jnp.stack(inputs, axis=1) return inputs def __call__( self, prompt_ids: jax.Array, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int = 50, guidance_scale: Union[float, jax.Array] = 7.5, height: Optional[int] = None, width: Optional[int] = None, latents: jnp.array = None, neg_prompt_ids: jnp.array = None, return_dict: bool = True, output_type: str = None, jit: bool = False, ): # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor if isinstance(guidance_scale, float) and jit: # Convert to a tensor so each device gets a copy. guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) guidance_scale = guidance_scale[:, None] return_latents = output_type == "latent" if jit: images = _p_generate( self, prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids, return_latents, ) else: images = self._generate( prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids, return_latents, ) if not return_dict: return (images,) return FlaxStableDiffusionXLPipelineOutput(images=images) def get_embeddings(self, prompt_ids: jnp.array, params): # We assume we have the two encoders # bs, encoder_input, seq_length te_1_inputs = prompt_ids[:, 0, :] te_2_inputs = prompt_ids[:, 1, :] prompt_embeds = self.text_encoder(te_1_inputs, params=params["text_encoder"], output_hidden_states=True) prompt_embeds = prompt_embeds["hidden_states"][-2] prompt_embeds_2_out = self.text_encoder_2( te_2_inputs, params=params["text_encoder_2"], output_hidden_states=True ) prompt_embeds_2 = prompt_embeds_2_out["hidden_states"][-2] text_embeds = prompt_embeds_2_out["text_embeds"] prompt_embeds = jnp.concatenate([prompt_embeds, prompt_embeds_2], axis=-1) return prompt_embeds, text_embeds def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, bs, dtype): add_time_ids = list(original_size + crops_coords_top_left + target_size) add_time_ids = jnp.array([add_time_ids] * bs, dtype=dtype) return add_time_ids def _generate( self, prompt_ids: jnp.array, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int, height: int, width: int, guidance_scale: float, latents: Optional[jnp.array] = None, neg_prompt_ids: Optional[jnp.array] = None, return_latents=False, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") # Encode input prompt prompt_embeds, pooled_embeds = self.get_embeddings(prompt_ids, params) # Get unconditional embeddings batch_size = prompt_embeds.shape[0] if neg_prompt_ids is None: neg_prompt_embeds = jnp.zeros_like(prompt_embeds) negative_pooled_embeds = jnp.zeros_like(pooled_embeds) else: neg_prompt_embeds, negative_pooled_embeds = self.get_embeddings(neg_prompt_ids, params) add_time_ids = self._get_add_time_ids( (height, width), (0, 0), (height, width), prompt_embeds.shape[0], dtype=prompt_embeds.dtype ) prompt_embeds = jnp.concatenate([neg_prompt_embeds, prompt_embeds], axis=0) # (2, 77, 2048) add_text_embeds = jnp.concatenate([negative_pooled_embeds, pooled_embeds], axis=0) add_time_ids = jnp.concatenate([add_time_ids, add_time_ids], axis=0) # Ensure model output will be `float32` before going into the scheduler guidance_scale = jnp.array([guidance_scale], dtype=jnp.float32) # Create random latents latents_shape = ( batch_size, self.unet.config.in_channels, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if latents is None: latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") # Prepare scheduler state scheduler_state = self.scheduler.set_timesteps( params["scheduler"], num_inference_steps=num_inference_steps, shape=latents.shape ) # scale the initial noise by the standard deviation required by the scheduler latents = latents * scheduler_state.init_noise_sigma added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} # Denoising loop def loop_body(step, args): latents, scheduler_state = args # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes latents_input = jnp.concatenate([latents] * 2) t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] timestep = jnp.broadcast_to(t, latents_input.shape[0]) latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) # predict the noise residual noise_pred = self.unet.apply( {"params": params["unet"]}, jnp.array(latents_input), jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs, ).sample # perform guidance noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() return latents, scheduler_state if DEBUG: # run with python for loop for i in range(num_inference_steps): latents, scheduler_state = loop_body(i, (latents, scheduler_state)) else: latents, _ = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state)) if return_latents: return latents # Decode latents latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) return image # Static argnums are pipe, num_inference_steps, height, width, return_latents. A change would trigger recompilation. # Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). @partial( jax.pmap, in_axes=(None, 0, 0, 0, None, None, None, 0, 0, 0, None), static_broadcasted_argnums=(0, 4, 5, 6, 10), ) def _p_generate( pipe, prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids, return_latents, ): return pipe._generate( prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids, return_latents, )
diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py", "repo_id": "diffusers", "token_count": 5251 }
123
import copy import inspect from dataclasses import dataclass from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from torch.nn.functional import grid_sample from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...image_processor import VaeImageProcessor from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, BaseOutput, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from ..stable_diffusion import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name def rearrange_0(tensor, f): F, C, H, W = tensor.size() tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) return tensor def rearrange_1(tensor): B, C, F, H, W = tensor.size() return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) def rearrange_3(tensor, f): F, D, C = tensor.size() return torch.reshape(tensor, (F // f, f, D, C)) def rearrange_4(tensor): B, F, D, C = tensor.size() return torch.reshape(tensor, (B * F, D, C)) class CrossFrameAttnProcessor: """ Cross frame attention processor. Each frame attends the first frame. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = key.size()[0] // self.batch_size first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class CrossFrameAttnProcessor2_0: """ Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) inner_dim = hidden_states.shape[-1] if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = max(1, key.size()[0] // self.batch_size) first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states @dataclass class TextToVideoPipelineOutput(BaseOutput): r""" Output class for zero-shot text-to-video pipeline. Args: images (`[List[PIL.Image.Image]`, `np.ndarray`]): List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`[List[bool]]`): List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or `None` if safety checking could not be performed. """ images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow Args: latent: latent code of a single frame reference_flow: flow which to warp the latent with Returns: warped: warped latent """ _, _, H, W = reference_flow.size() _, _, h, w = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference device: device dtype: dtype Returns: """ seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference latents: latent codes of frames Returns: warped_latents: warped latents """ motion_field = create_motion_field( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype, ) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents class TextToVideoZeroPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin): r""" Pipeline for zero-shot text-to-video generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer (`CLIPTokenizer`): A [`~transformers.CLIPTokenizer`] to tokenize text. unet ([`UNet2DConditionModel`]): A [`UNet3DConditionModel`] to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`CLIPImageProcessor`]): A [`CLIPImageProcessor`] to extract features from generated images; used as inputs to the `safety_checker`. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) processor = ( CrossFrameAttnProcessor2_0(batch_size=2) if hasattr(F, "scaled_dot_product_attention") else CrossFrameAttnProcessor(batch_size=2) ) self.unet.set_attn_processor(processor) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def forward_loop(self, x_t0, t0, t1, generator): """ Perform DDPM forward process from time t0 to t1. This is the same as adding noise with corresponding variance. Args: x_t0: Latent code at time t0. t0: Timestep at t0. t1: Timestamp at t1. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. Returns: x_t1: Forward process applied to x_t0 from time t0 to t1. """ eps = randn_tensor(x_t0.size(), generator=generator, dtype=x_t0.dtype, device=x_t0.device) alpha_vec = torch.prod(self.scheduler.alphas[t0:t1]) x_t1 = torch.sqrt(alpha_vec) * x_t0 + torch.sqrt(1 - alpha_vec) * eps return x_t1 def backward_loop( self, latents, timesteps, prompt_embeds, guidance_scale, callback, callback_steps, num_warmup_steps, extra_step_kwargs, cross_attention_kwargs=None, ): """ Perform backward process given list of time steps. Args: latents: Latents at time timesteps[0]. timesteps: Time steps along which to perform backward process. prompt_embeds: Pre-generated text embeddings. guidance_scale: A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. extra_step_kwargs: Extra_step_kwargs. cross_attention_kwargs: A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). num_warmup_steps: number of warmup steps. Returns: latents: Latents of backward process output at time timesteps[-1]. """ do_classifier_free_guidance = guidance_scale > 1.0 num_steps = (len(timesteps) - num_warmup_steps) // self.scheduler.order with self.progress_bar(total=num_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) return latents.clone().detach() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], video_length: Optional[int] = 8, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, motion_field_strength_x: float = 12, motion_field_strength_y: float = 12, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, t0: int = 44, t1: int = 47, frame_ids: Optional[List[int]] = None, ): """ The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. video_length (`int`, *optional*, defaults to 8): The number of generated video frames. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in video generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"numpy"`): The output format of the generated video. Choose between `"latent"` and `"numpy"`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. motion_field_strength_x (`float`, *optional*, defaults to 12): Strength of motion in generated video along x-axis. See the [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. motion_field_strength_y (`float`, *optional*, defaults to 12): Strength of motion in generated video along y-axis. See the [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. t0 (`int`, *optional*, defaults to 44): Timestep t0. Should be in the range [0, num_inference_steps - 1]. See the [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. t1 (`int`, *optional*, defaults to 47): Timestep t0. Should be in the range [t0 + 1, num_inference_steps - 1]. See the [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. frame_ids (`List[int]`, *optional*): Indexes of the frames that are being generated. This is used when generating longer videos chunk-by-chunk. Returns: [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`]: The output contains a `ndarray` of the generated video, when `output_type` != `"latent"`, otherwise a latent code of generated videos and a list of `bool`s indicating whether the corresponding generated video contains "not-safe-for-work" (nsfw) content.. """ assert video_length > 0 if frame_ids is None: frame_ids = list(range(video_length)) assert len(frame_ids) == video_length assert num_videos_per_prompt == 1 if isinstance(prompt, str): prompt = [prompt] if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt_embeds_tuple = self.encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order # Perform the first backward process up to time T_1 x_1_t1 = self.backward_loop( timesteps=timesteps[: -t1 - 1], prompt_embeds=prompt_embeds, latents=latents, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=num_warmup_steps, ) scheduler_copy = copy.deepcopy(self.scheduler) # Perform the second backward process up to time T_0 x_1_t0 = self.backward_loop( timesteps=timesteps[-t1 - 1 : -t0 - 1], prompt_embeds=prompt_embeds, latents=x_1_t1, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=0, ) # Propagate first frame latents at time T_0 to remaining frames x_2k_t0 = x_1_t0.repeat(video_length - 1, 1, 1, 1) # Add motion in latents at time T_0 x_2k_t0 = create_motion_field_and_warp_latents( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, latents=x_2k_t0, frame_ids=frame_ids[1:], ) # Perform forward process up to time T_1 x_2k_t1 = self.forward_loop( x_t0=x_2k_t0, t0=timesteps[-t0 - 1].item(), t1=timesteps[-t1 - 1].item(), generator=generator, ) # Perform backward process from time T_1 to 0 x_1k_t1 = torch.cat([x_1_t1, x_2k_t1]) b, l, d = prompt_embeds.size() prompt_embeds = prompt_embeds[:, None].repeat(1, video_length, 1, 1).reshape(b * video_length, l, d) self.scheduler = scheduler_copy x_1k_0 = self.backward_loop( timesteps=timesteps[-t1 - 1 :], prompt_embeds=prompt_embeds, latents=x_1k_t1, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=0, ) latents = x_1k_0 # manually for max memory savings if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.unet.to("cpu") torch.cuda.empty_cache() if output_type == "latent": image = latents has_nsfw_concept = None else: image = self.decode_latents(latents) # Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return TextToVideoPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image
diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py", "repo_id": "diffusers", "token_count": 19885 }
124
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Dict, List, Optional, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...schedulers import DDPMWuerstchenScheduler from ...utils import deprecate, replace_example_docstring from ..pipeline_utils import DiffusionPipeline from .modeling_paella_vq_model import PaellaVQModel from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt from .modeling_wuerstchen_prior import WuerstchenPrior from .pipeline_wuerstchen import WuerstchenDecoderPipeline from .pipeline_wuerstchen_prior import WuerstchenPriorPipeline TEXT2IMAGE_EXAMPLE_DOC_STRING = """ Examples: ```py >>> from diffusions import WuerstchenCombinedPipeline >>> pipe = WuerstchenCombinedPipeline.from_pretrained("warp-ai/Wuerstchen", torch_dtype=torch.float16).to( ... "cuda" ... ) >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet" >>> images = pipe(prompt=prompt) ``` """ class WuerstchenCombinedPipeline(DiffusionPipeline): """ Combined Pipeline for text-to-image generation using Wuerstchen This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: tokenizer (`CLIPTokenizer`): The decoder tokenizer to be used for text inputs. text_encoder (`CLIPTextModel`): The decoder text encoder to be used for text inputs. decoder (`WuerstchenDiffNeXt`): The decoder model to be used for decoder image generation pipeline. scheduler (`DDPMWuerstchenScheduler`): The scheduler to be used for decoder image generation pipeline. vqgan (`PaellaVQModel`): The VQGAN model to be used for decoder image generation pipeline. prior_tokenizer (`CLIPTokenizer`): The prior tokenizer to be used for text inputs. prior_text_encoder (`CLIPTextModel`): The prior text encoder to be used for text inputs. prior_prior (`WuerstchenPrior`): The prior model to be used for prior pipeline. prior_scheduler (`DDPMWuerstchenScheduler`): The scheduler to be used for prior pipeline. """ _load_connected_pipes = True def __init__( self, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, decoder: WuerstchenDiffNeXt, scheduler: DDPMWuerstchenScheduler, vqgan: PaellaVQModel, prior_tokenizer: CLIPTokenizer, prior_text_encoder: CLIPTextModel, prior_prior: WuerstchenPrior, prior_scheduler: DDPMWuerstchenScheduler, ): super().__init__() self.register_modules( text_encoder=text_encoder, tokenizer=tokenizer, decoder=decoder, scheduler=scheduler, vqgan=vqgan, prior_prior=prior_prior, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, ) self.prior_pipe = WuerstchenPriorPipeline( prior=prior_prior, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, ) self.decoder_pipe = WuerstchenDecoderPipeline( text_encoder=text_encoder, tokenizer=tokenizer, decoder=decoder, scheduler=scheduler, vqgan=vqgan, ) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_model_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. """ self.prior_pipe.enable_model_cpu_offload(gpu_id=gpu_id) self.decoder_pipe.enable_model_cpu_offload(gpu_id=gpu_id) def enable_sequential_cpu_offload(self, gpu_id=0): r""" Offloads all models (`unet`, `text_encoder`, `vae`, and `safety checker` state dicts) to CPU using 🤗 Accelerate, significantly reducing memory usage. Models are moved to a `torch.device('meta')` and loaded on a GPU only when their specific submodule's `forward` method is called. Offloading happens on a submodule basis. Memory savings are higher than using `enable_model_cpu_offload`, but performance is lower. """ self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) def __call__( self, prompt: Optional[Union[str, List[str]]] = None, height: int = 512, width: int = 512, prior_num_inference_steps: int = 60, prior_timesteps: Optional[List[float]] = None, prior_guidance_scale: float = 4.0, num_inference_steps: int = 12, decoder_timesteps: Optional[List[float]] = None, decoder_guidance_scale: float = 0.0, negative_prompt: Optional[Union[str, List[str]]] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation for the prior and decoder. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. prior_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `prior_guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `prior_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. prior_num_inference_steps (`Union[int, Dict[float, int]]`, *optional*, defaults to 60): The number of prior denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. For more specific timestep spacing, you can pass customized `prior_timesteps` num_inference_steps (`int`, *optional*, defaults to 12): The number of decoder denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. For more specific timestep spacing, you can pass customized `timesteps` prior_timesteps (`List[float]`, *optional*): Custom timesteps to use for the denoising process for the prior. If not defined, equal spaced `prior_num_inference_steps` timesteps are used. Must be in descending order. decoder_timesteps (`List[float]`, *optional*): Custom timesteps to use for the denoising process for the decoder. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order. decoder_guidance_scale (`float`, *optional*, defaults to 0.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. prior_callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `prior_callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. prior_callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `prior_callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` [`~pipelines.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ prior_kwargs = {} if kwargs.get("prior_callback", None) is not None: prior_kwargs["callback"] = kwargs.pop("prior_callback") deprecate( "prior_callback", "1.0.0", "Passing `prior_callback` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`", ) if kwargs.get("prior_callback_steps", None) is not None: deprecate( "prior_callback_steps", "1.0.0", "Passing `prior_callback_steps` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`", ) prior_kwargs["callback_steps"] = kwargs.pop("prior_callback_steps") prior_outputs = self.prior_pipe( prompt=prompt if prompt_embeds is None else None, height=height, width=width, num_inference_steps=prior_num_inference_steps, timesteps=prior_timesteps, guidance_scale=prior_guidance_scale, negative_prompt=negative_prompt if negative_prompt_embeds is None else None, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, num_images_per_prompt=num_images_per_prompt, generator=generator, latents=latents, output_type="pt", return_dict=False, callback_on_step_end=prior_callback_on_step_end, callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, **prior_kwargs, ) image_embeddings = prior_outputs[0] outputs = self.decoder_pipe( image_embeddings=image_embeddings, prompt=prompt if prompt is not None else "", num_inference_steps=num_inference_steps, timesteps=decoder_timesteps, guidance_scale=decoder_guidance_scale, negative_prompt=negative_prompt, generator=generator, output_type=output_type, return_dict=return_dict, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, **kwargs, ) return outputs
diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py", "repo_id": "diffusers", "token_count": 6899 }
125
# Copyright 2023 UC Berkeley Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class DDPMSchedulerState: common: CommonSchedulerState # setable values init_noise_sigma: jnp.ndarray timesteps: jnp.ndarray num_inference_steps: Optional[int] = None @classmethod def create(cls, common: CommonSchedulerState, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray): return cls(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps) @dataclass class FlaxDDPMSchedulerOutput(FlaxSchedulerOutput): state: DDPMSchedulerState class FlaxDDPMScheduler(FlaxSchedulerMixin, ConfigMixin): """ Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and Langevin dynamics sampling. [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. For more details, see the original paper: https://arxiv.org/abs/2006.11239 Args: num_train_timesteps (`int`): number of diffusion steps used to train the model. beta_start (`float`): the starting `beta` value of inference. beta_end (`float`): the final `beta` value. beta_schedule (`str`): the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, optional): option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. variance_type (`str`): options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. clip_sample (`bool`, default `True`): option to clip predicted sample between -1 and 1 for numerical stability. prediction_type (`str`, default `epsilon`): indicates whether the model predicts the noise (epsilon), or the samples. One of `epsilon`, `sample`. `v-prediction` is not supported for this scheduler. dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): the `dtype` used for params and computation. """ _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] dtype: jnp.dtype @property def has_state(self): return True @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[jnp.ndarray] = None, variance_type: str = "fixed_small", clip_sample: bool = True, prediction_type: str = "epsilon", dtype: jnp.dtype = jnp.float32, ): self.dtype = dtype def create_state(self, common: Optional[CommonSchedulerState] = None) -> DDPMSchedulerState: if common is None: common = CommonSchedulerState.create(self) # standard deviation of the initial noise distribution init_noise_sigma = jnp.array(1.0, dtype=self.dtype) timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] return DDPMSchedulerState.create( common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps, ) def scale_model_input( self, state: DDPMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None ) -> jnp.ndarray: """ Args: state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. sample (`jnp.ndarray`): input sample timestep (`int`, optional): current timestep Returns: `jnp.ndarray`: scaled input sample """ return sample def set_timesteps( self, state: DDPMSchedulerState, num_inference_steps: int, shape: Tuple = () ) -> DDPMSchedulerState: """ Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. Args: state (`DDIMSchedulerState`): the `FlaxDDPMScheduler` state data class instance. num_inference_steps (`int`): the number of diffusion steps used when generating samples with a pre-trained model. """ step_ratio = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round()[::-1] return state.replace( num_inference_steps=num_inference_steps, timesteps=timesteps, ) def _get_variance(self, state: DDPMSchedulerState, t, predicted_variance=None, variance_type=None): alpha_prod_t = state.common.alphas_cumprod[t] alpha_prod_t_prev = jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype)) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: variance_type = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": variance = jnp.clip(variance, a_min=1e-20) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": variance = jnp.log(jnp.clip(variance, a_min=1e-20)) elif variance_type == "fixed_large": variance = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log variance = jnp.log(state.common.betas[t]) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": min_log = variance max_log = state.common.betas[t] frac = (predicted_variance + 1) / 2 variance = frac * max_log + (1 - frac) * min_log return variance def step( self, state: DDPMSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, key: Optional[jax.Array] = None, return_dict: bool = True, ) -> Union[FlaxDDPMSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: state (`DDPMSchedulerState`): the `FlaxDDPMScheduler` state data class instance. model_output (`jnp.ndarray`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. key (`jax.Array`): a PRNG key. return_dict (`bool`): option for returning tuple rather than FlaxDDPMSchedulerOutput class Returns: [`FlaxDDPMSchedulerOutput`] or `tuple`: [`FlaxDDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ t = timestep if key is None: key = jax.random.PRNGKey(0) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: model_output, predicted_variance = jnp.split(model_output, sample.shape[1], axis=1) else: predicted_variance = None # 1. compute alphas, betas alpha_prod_t = state.common.alphas_cumprod[t] alpha_prod_t_prev = jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype)) beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) elif self.config.prediction_type == "sample": pred_original_sample = model_output elif self.config.prediction_type == "v_prediction": pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` " " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: pred_original_sample = jnp.clip(pred_original_sample, -1, 1) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * state.common.betas[t]) / beta_prod_t current_sample_coeff = state.common.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): split_key = jax.random.split(key, num=1) noise = jax.random.normal(split_key, shape=model_output.shape, dtype=self.dtype) return (self._get_variance(state, t, predicted_variance=predicted_variance) ** 0.5) * noise variance = jnp.where(t > 0, random_variance(), jnp.zeros(model_output.shape, dtype=self.dtype)) pred_prev_sample = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=pred_prev_sample, state=state) def add_noise( self, state: DDPMSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray, ) -> jnp.ndarray: return add_noise_common(state.common, original_samples, noise, timesteps) def get_velocity( self, state: DDPMSchedulerState, sample: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray, ) -> jnp.ndarray: return get_velocity_common(state.common, sample, noise, timesteps) def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_ddpm_flax.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_ddpm_flax.py", "repo_id": "diffusers", "token_count": 5237 }
126
# Copyright 2023 NVIDIA and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class KarrasVeSchedulerState: # setable values num_inference_steps: Optional[int] = None timesteps: Optional[jnp.ndarray] = None schedule: Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def create(cls): return cls() @dataclass class FlaxKarrasVeOutput(BaseOutput): """ Output class for the scheduler's step function output. Args: prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. derivative (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): Derivative of predicted original image sample (x_0). state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. """ prev_sample: jnp.ndarray derivative: jnp.ndarray state: KarrasVeSchedulerState class FlaxKarrasVeScheduler(FlaxSchedulerMixin, ConfigMixin): """ Stochastic sampling from Karras et al. [1] tailored to the Variance-Expanding (VE) models [2]. Use Algorithm 2 and the VE column of Table 1 from [1] for reference. [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models." https://arxiv.org/abs/2206.00364 [2] Song, Yang, et al. "Score-based generative modeling through stochastic differential equations." https://arxiv.org/abs/2011.13456 [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. For more details on the parameters, see the original paper's Appendix E.: "Elucidating the Design Space of Diffusion-Based Generative Models." https://arxiv.org/abs/2206.00364. The grid search values used to find the optimal {s_noise, s_churn, s_min, s_max} for a specific model are described in Table 5 of the paper. Args: sigma_min (`float`): minimum noise magnitude sigma_max (`float`): maximum noise magnitude s_noise (`float`): the amount of additional noise to counteract loss of detail during sampling. A reasonable range is [1.000, 1.011]. s_churn (`float`): the parameter controlling the overall amount of stochasticity. A reasonable range is [0, 100]. s_min (`float`): the start value of the sigma range where we add noise (enable stochasticity). A reasonable range is [0, 10]. s_max (`float`): the end value of the sigma range where we add noise. A reasonable range is [0.2, 80]. """ @property def has_state(self): return True @register_to_config def __init__( self, sigma_min: float = 0.02, sigma_max: float = 100, s_noise: float = 1.007, s_churn: float = 80, s_min: float = 0.05, s_max: float = 50, ): pass def create_state(self): return KarrasVeSchedulerState.create() def set_timesteps( self, state: KarrasVeSchedulerState, num_inference_steps: int, shape: Tuple = () ) -> KarrasVeSchedulerState: """ Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference. Args: state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. num_inference_steps (`int`): the number of diffusion steps used when generating samples with a pre-trained model. """ timesteps = jnp.arange(0, num_inference_steps)[::-1].copy() schedule = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=num_inference_steps, schedule=jnp.array(schedule, dtype=jnp.float32), timesteps=timesteps, ) def add_noise_to_input( self, state: KarrasVeSchedulerState, sample: jnp.ndarray, sigma: float, key: jax.Array, ) -> Tuple[jnp.ndarray, float]: """ Explicit Langevin-like "churn" step of adding noise to the sample according to a factor gamma_i ≥ 0 to reach a higher noise level sigma_hat = sigma_i + gamma_i*sigma_i. TODO Args: """ if self.config.s_min <= sigma <= self.config.s_max: gamma = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1) else: gamma = 0 # sample eps ~ N(0, S_noise^2 * I) key = random.split(key, num=1) eps = self.config.s_noise * random.normal(key=key, shape=sample.shape) sigma_hat = sigma + gamma * sigma sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def step( self, state: KarrasVeSchedulerState, model_output: jnp.ndarray, sigma_hat: float, sigma_prev: float, sample_hat: jnp.ndarray, return_dict: bool = True, ) -> Union[FlaxKarrasVeOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model. sigma_hat (`float`): TODO sigma_prev (`float`): TODO sample_hat (`torch.FloatTensor` or `np.ndarray`): TODO return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class Returns: [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] or `tuple`: Updated sample in the diffusion chain and derivative. [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ pred_original_sample = sample_hat + sigma_hat * model_output derivative = (sample_hat - pred_original_sample) / sigma_hat sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state) def step_correct( self, state: KarrasVeSchedulerState, model_output: jnp.ndarray, sigma_hat: float, sigma_prev: float, sample_hat: jnp.ndarray, sample_prev: jnp.ndarray, derivative: jnp.ndarray, return_dict: bool = True, ) -> Union[FlaxKarrasVeOutput, Tuple]: """ Correct the predicted sample based on the output model_output of the network. TODO complete description Args: state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model. sigma_hat (`float`): TODO sigma_prev (`float`): TODO sample_hat (`torch.FloatTensor` or `np.ndarray`): TODO sample_prev (`torch.FloatTensor` or `np.ndarray`): TODO derivative (`torch.FloatTensor` or `np.ndarray`): TODO return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class Returns: prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO """ pred_original_sample = sample_prev + sigma_prev * model_output derivative_corr = (sample_prev - pred_original_sample) / sigma_prev sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state) def add_noise(self, state: KarrasVeSchedulerState, original_samples, noise, timesteps): raise NotImplementedError()
diffusers/src/diffusers/schedulers/scheduling_karras_ve_flax.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_karras_ve_flax.py", "repo_id": "diffusers", "token_count": 3955 }
127
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from packaging import version from .. import __version__ from .constants import ( CONFIG_NAME, DEPRECATED_REVISION_ARGS, DIFFUSERS_DYNAMIC_MODULE_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MIN_PEFT_VERSION, ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, SAFETENSORS_FILE_EXTENSION, SAFETENSORS_WEIGHTS_NAME, USE_PEFT_BACKEND, WEIGHTS_NAME, ) from .deprecation_utils import deprecate from .doc_utils import replace_example_docstring from .dynamic_modules_utils import get_class_from_dynamic_module from .export_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video from .hub_utils import ( PushToHubMixin, _add_variant, _get_model_file, extract_commit_hash, http_user_agent, ) from .import_utils import ( BACKENDS_MAPPING, DIFFUSERS_SLOW_IMPORT, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_accelerate_available, is_accelerate_version, is_bs4_available, is_flax_available, is_ftfy_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_peft_available, is_scipy_available, is_tensorboard_available, is_torch_available, is_torch_version, is_torch_xla_available, is_torchsde_available, is_torchvision_available, is_transformers_available, is_transformers_version, is_unidecode_available, is_wandb_available, is_xformers_available, requires_backends, ) from .loading_utils import load_image from .logging import get_logger from .outputs import BaseOutput from .peft_utils import ( check_peft_version, delete_adapter_layers, get_adapter_name, get_peft_kwargs, recurse_remove_peft_layers, scale_lora_layers, set_adapter_layers, set_weights_and_activate_adapters, unscale_lora_layers, ) from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil from .state_dict_utils import ( convert_all_state_dict_to_peft, convert_state_dict_to_diffusers, convert_state_dict_to_kohya, convert_state_dict_to_peft, convert_unet_state_dict_to_peft, ) logger = get_logger(__name__) def check_min_version(min_version): if version.parse(__version__) < version.parse(min_version): if "dev" in min_version: error_message = ( "This example requires a source install from HuggingFace diffusers (see " "`https://huggingface.co/docs/diffusers/installation#install-from-source`)," ) else: error_message = f"This example requires a minimum version of {min_version}," error_message += f" but the version found is {__version__}.\n" raise ImportError(error_message)
diffusers/src/diffusers/utils/__init__.py/0
{ "file_path": "diffusers/src/diffusers/utils/__init__.py", "repo_id": "diffusers", "token_count": 1483 }
128
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class SpectrogramDiffusionPipeline(metaclass=DummyObject): _backends = ["transformers", "torch", "note_seq"] def __init__(self, *args, **kwargs): requires_backends(self, ["transformers", "torch", "note_seq"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["transformers", "torch", "note_seq"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["transformers", "torch", "note_seq"])
diffusers/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py", "repo_id": "diffusers", "token_count": 236 }
129
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class CustomLocalPipeline(DiffusionPipeline): r""" This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Parameters: unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, generator: Optional[torch.Generator] = None, num_inference_steps: int = 50, output_type: Optional[str] = "pil", return_dict: bool = True, **kwargs, ) -> Union[ImagePipelineOutput, Tuple]: r""" Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. eta (`float`, *optional*, defaults to 0.0): The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM). num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. """ # Sample gaussian noise to begin loop image = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=generator, ) image = image.to(self.device) # set step values self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output model_output = self.unet(image, t).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 image = self.scheduler.step(model_output, t, image).prev_sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=image), "This is a local test"
diffusers/tests/fixtures/custom_pipeline/pipeline.py/0
{ "file_path": "diffusers/tests/fixtures/custom_pipeline/pipeline.py", "repo_id": "diffusers", "token_count": 1739 }
130
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import torch from diffusers import UNet2DConditionModel from diffusers.training_utils import EMAModel from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device enable_full_determinism() class EMAModelTests(unittest.TestCase): model_id = "hf-internal-testing/tiny-stable-diffusion-pipe" batch_size = 1 prompt_length = 77 text_encoder_hidden_dim = 32 num_in_channels = 4 latent_height = latent_width = 64 generator = torch.manual_seed(0) def get_models(self, decay=0.9999): unet = UNet2DConditionModel.from_pretrained(self.model_id, subfolder="unet") unet = unet.to(torch_device) ema_unet = EMAModel(unet.parameters(), decay=decay, model_cls=UNet2DConditionModel, model_config=unet.config) return unet, ema_unet def get_dummy_inputs(self): noisy_latents = torch.randn( self.batch_size, self.num_in_channels, self.latent_height, self.latent_width, generator=self.generator ).to(torch_device) timesteps = torch.randint(0, 1000, size=(self.batch_size,), generator=self.generator).to(torch_device) encoder_hidden_states = torch.randn( self.batch_size, self.prompt_length, self.text_encoder_hidden_dim, generator=self.generator ).to(torch_device) return noisy_latents, timesteps, encoder_hidden_states def simulate_backprop(self, unet): updated_state_dict = {} for k, param in unet.state_dict().items(): updated_param = torch.randn_like(param) + (param * torch.randn_like(param)) updated_state_dict.update({k: updated_param}) unet.load_state_dict(updated_state_dict) return unet def test_optimization_steps_updated(self): unet, ema_unet = self.get_models() # Take the first (hypothetical) EMA step. ema_unet.step(unet.parameters()) assert ema_unet.optimization_step == 1 # Take two more. for _ in range(2): ema_unet.step(unet.parameters()) assert ema_unet.optimization_step == 3 def test_shadow_params_not_updated(self): unet, ema_unet = self.get_models() # Since the `unet` is not being updated (i.e., backprop'd) # there won't be any difference between the `params` of `unet` # and `ema_unet` even if we call `ema_unet.step(unet.parameters())`. ema_unet.step(unet.parameters()) orig_params = list(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert torch.allclose(s_param, param) # The above holds true even if we call `ema.step()` multiple times since # `unet` params are still not being updated. for _ in range(4): ema_unet.step(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert torch.allclose(s_param, param) def test_shadow_params_updated(self): unet, ema_unet = self.get_models() # Here we simulate the parameter updates for `unet`. Since there might # be some parameters which are initialized to zero we take extra care to # initialize their values to something non-zero before the multiplication. unet_pseudo_updated_step_one = self.simulate_backprop(unet) # Take the EMA step. ema_unet.step(unet_pseudo_updated_step_one.parameters()) # Now the EMA'd parameters won't be equal to the original model parameters. orig_params = list(unet_pseudo_updated_step_one.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert ~torch.allclose(s_param, param) # Ensure this is the case when we take multiple EMA steps. for _ in range(4): ema_unet.step(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert ~torch.allclose(s_param, param) def test_consecutive_shadow_params_updated(self): # If we call EMA step after a backpropagation consecutively for two times, # the shadow params from those two steps should be different. unet, ema_unet = self.get_models() # First backprop + EMA unet_step_one = self.simulate_backprop(unet) ema_unet.step(unet_step_one.parameters()) step_one_shadow_params = ema_unet.shadow_params # Second backprop + EMA unet_step_two = self.simulate_backprop(unet_step_one) ema_unet.step(unet_step_two.parameters()) step_two_shadow_params = ema_unet.shadow_params for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): assert ~torch.allclose(step_one, step_two) def test_zero_decay(self): # If there's no decay even if there are backprops, EMA steps # won't take any effect i.e., the shadow params would remain the # same. unet, ema_unet = self.get_models(decay=0.0) unet_step_one = self.simulate_backprop(unet) ema_unet.step(unet_step_one.parameters()) step_one_shadow_params = ema_unet.shadow_params unet_step_two = self.simulate_backprop(unet_step_one) ema_unet.step(unet_step_two.parameters()) step_two_shadow_params = ema_unet.shadow_params for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): assert torch.allclose(step_one, step_two) @skip_mps def test_serialization(self): unet, ema_unet = self.get_models() noisy_latents, timesteps, encoder_hidden_states = self.get_dummy_inputs() with tempfile.TemporaryDirectory() as tmpdir: ema_unet.save_pretrained(tmpdir) loaded_unet = UNet2DConditionModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel) loaded_unet = loaded_unet.to(unet.device) # Since no EMA step has been performed the outputs should match. output = unet(noisy_latents, timesteps, encoder_hidden_states).sample output_loaded = loaded_unet(noisy_latents, timesteps, encoder_hidden_states).sample assert torch.allclose(output, output_loaded, atol=1e-4)
diffusers/tests/others/test_ema.py/0
{ "file_path": "diffusers/tests/others/test_ema.py", "repo_id": "diffusers", "token_count": 2816 }
131
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class DanceDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = DanceDiffusionPipeline params = UNCONDITIONAL_AUDIO_GENERATION_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - { "callback", "latents", "callback_steps", "output_type", "num_images_per_prompt", } batch_params = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS test_attention_slicing = False def get_dummy_components(self): torch.manual_seed(0) unet = UNet1DModel( block_out_channels=(32, 32, 64), extra_in_channels=16, sample_size=512, sample_rate=16_000, in_channels=2, out_channels=2, flip_sin_to_cos=True, use_timestep_embedding=False, time_embedding_type="fourier", mid_block_type="UNetMidBlock1D", down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), ) scheduler = IPNDMScheduler() components = { "unet": unet, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "batch_size": 1, "generator": generator, "num_inference_steps": 4, } return inputs def test_dance_diffusion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = DanceDiffusionPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) audio = output.audios audio_slice = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) expected_slice = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_save_load_local(self): return super().test_save_load_local() @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @skip_mps def test_attention_slicing_forward_pass(self): return super().test_attention_slicing_forward_pass() def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @nightly @require_torch_gpu class PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_dance_diffusion(self): device = torch_device pipe = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k") pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipe(generator=generator, num_inference_steps=100, audio_length_in_s=4.096) audio = output.audios audio_slice = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) expected_slice = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 def test_dance_diffusion_fp16(self): device = torch_device pipe = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k", torch_dtype=torch.float16) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipe(generator=generator, num_inference_steps=100, audio_length_in_s=4.096) audio = output.audios audio_slice = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) expected_slice = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2
diffusers/tests/pipelines/dance_diffusion/test_dance_diffusion.py/0
{ "file_path": "diffusers/tests/pipelines/dance_diffusion/test_dance_diffusion.py", "repo_id": "diffusers", "token_count": 2423 }
132
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPVisionModelWithProjection, ) from diffusers import ( StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLPipeline, ) from diffusers.models.attention_processor import AttnProcessor, AttnProcessor2_0 from diffusers.utils import load_image from diffusers.utils.testing_utils import ( enable_full_determinism, require_torch_gpu, slow, torch_device, ) enable_full_determinism() class IPAdapterNightlyTestsMixin(unittest.TestCase): dtype = torch.float16 def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_image_encoder(self, repo_id, subfolder): image_encoder = CLIPVisionModelWithProjection.from_pretrained( repo_id, subfolder=subfolder, torch_dtype=self.dtype ).to(torch_device) return image_encoder def get_image_processor(self, repo_id): image_processor = CLIPImageProcessor.from_pretrained(repo_id) return image_processor def get_dummy_inputs(self, for_image_to_image=False, for_inpainting=False, for_sdxl=False): image = load_image( "https://user-images.githubusercontent.com/24734142/266492875-2d50d223-8475-44f0-a7c6-08b51cb53572.png" ) if for_sdxl: image = image.resize((1024, 1024)) input_kwargs = { "prompt": "best quality, high quality", "negative_prompt": "monochrome, lowres, bad anatomy, worst quality, low quality", "num_inference_steps": 5, "generator": torch.Generator(device="cpu").manual_seed(33), "ip_adapter_image": image, "output_type": "np", } if for_image_to_image: image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/vermeer.jpg") ip_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/river.png") if for_sdxl: image = image.resize((1024, 1024)) ip_image = ip_image.resize((1024, 1024)) input_kwargs.update({"image": image, "ip_adapter_image": ip_image}) elif for_inpainting: image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/inpaint_image.png") mask = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/mask.png") ip_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/girl.png") if for_sdxl: image = image.resize((1024, 1024)) mask = mask.resize((1024, 1024)) ip_image = ip_image.resize((1024, 1024)) input_kwargs.update({"image": image, "mask_image": mask, "ip_adapter_image": ip_image}) return input_kwargs @slow @require_torch_gpu class IPAdapterSDIntegrationTests(IPAdapterNightlyTestsMixin): def test_text_to_image(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.80810547, 0.88183594, 0.9296875, 0.9189453, 0.9848633, 1.0, 0.97021484, 1.0, 1.0]) assert np.allclose(image_slice, expected_slice, atol=1e-3) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin") inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.30444336, 0.26513672, 0.22436523, 0.2758789, 0.25585938, 0.20751953, 0.25390625, 0.24633789, 0.21923828] ) assert np.allclose(image_slice, expected_slice, atol=1e-3) def test_image_to_image(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.22167969, 0.21875, 0.21728516, 0.22607422, 0.21948242, 0.23925781, 0.22387695, 0.25268555, 0.2722168] ) assert np.allclose(image_slice, expected_slice, atol=1e-3) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin") inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.35913086, 0.265625, 0.26367188, 0.24658203, 0.19750977, 0.39990234, 0.15258789, 0.20336914, 0.5517578] ) assert np.allclose(image_slice, expected_slice, atol=1e-3) def test_inpainting(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.27148438, 0.24047852, 0.22167969, 0.23217773, 0.21118164, 0.21142578, 0.21875, 0.20751953, 0.20019531] ) assert np.allclose(image_slice, expected_slice, atol=1e-3) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin") inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.27294922, 0.24023438, 0.21948242, 0.23242188, 0.20825195, 0.2055664, 0.21679688, 0.20336914, 0.19360352] ) assert np.allclose(image_slice, expected_slice, atol=1e-3) def test_text_to_image_model_cpu_offload(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") pipeline.to(torch_device) inputs = self.get_dummy_inputs() output_without_offload = pipeline(**inputs).images pipeline.enable_model_cpu_offload() inputs = self.get_dummy_inputs() output_with_offload = pipeline(**inputs).images max_diff = np.abs(output_with_offload - output_without_offload).max() self.assertLess(max_diff, 1e-3, "CPU offloading should not affect the inference results") offloaded_modules = [ v for k, v in pipeline.components.items() if isinstance(v, torch.nn.Module) and k not in pipeline._exclude_from_cpu_offload ] ( self.assertTrue(all(v.device.type == "cpu" for v in offloaded_modules)), f"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}", ) def test_text_to_image_full_face(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-full-face_sd15.bin") pipeline.set_ip_adapter_scale(0.7) inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.18115234, 0.13500977, 0.13427734, 0.24194336, 0.17138672, 0.16625977, 0.4260254, 0.43359375, 0.4416504] ) assert np.allclose(image_slice, expected_slice, atol=1e-3) def test_unload(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") pipeline.set_ip_adapter_scale(0.7) pipeline.unload_ip_adapter() assert getattr(pipeline, "image_encoder") is None assert getattr(pipeline, "feature_extractor") is None processors = [ isinstance(attn_proc, (AttnProcessor, AttnProcessor2_0)) for name, attn_proc in pipeline.unet.attn_processors.items() ] assert processors == [True] * len(processors) def test_multi(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="models", weight_name=["ip-adapter_sd15.bin", "ip-adapter-plus_sd15.bin"] ) pipeline.set_ip_adapter_scale([0.7, 0.3]) inputs = self.get_dummy_inputs() ip_adapter_image = inputs["ip_adapter_image"] inputs["ip_adapter_image"] = [ip_adapter_image, [ip_adapter_image] * 2] images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.5234375, 0.53515625, 0.5629883, 0.57128906, 0.59521484, 0.62109375, 0.57910156, 0.6201172, 0.6508789] ) assert np.allclose(image_slice, expected_slice, atol=1e-3) @slow @require_torch_gpu class IPAdapterSDXLIntegrationTests(IPAdapterNightlyTestsMixin): def test_text_to_image_sdxl(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [ 0.09630299, 0.09551358, 0.08480701, 0.09070173, 0.09437338, 0.09264627, 0.08883232, 0.09287417, 0.09197289, ] ) assert np.allclose(image_slice, expected_slice, atol=1e-3) image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus_sdxl_vit-h.bin", ) inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.0576596, 0.05600825, 0.04479006, 0.05288461, 0.05461192, 0.05137569, 0.04867965, 0.05301541, 0.04939842] ) assert np.allclose(image_slice, expected_slice, atol=1e-3) def test_image_to_image_sdxl(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLImg2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [ 0.06513795, 0.07009393, 0.07234055, 0.07426041, 0.07002589, 0.06415862, 0.07827643, 0.07962808, 0.07411247, ] ) assert np.allclose(image_slice, expected_slice, atol=1e-3) image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLImg2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus_sdxl_vit-h.bin", ) inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [ 0.07126552, 0.07025367, 0.07348302, 0.07580167, 0.07467338, 0.06918576, 0.07480252, 0.08279955, 0.08547315, ] ) assert np.allclose(image_slice, expected_slice, atol=1e-3) def test_inpainting_sdxl(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() image_slice.tolist() expected_slice = np.array( [0.14181179, 0.1493012, 0.14283323, 0.14602411, 0.14915377, 0.15015268, 0.14725655, 0.15009224, 0.15164584] ) assert np.allclose(image_slice, expected_slice, atol=1e-3) image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus_sdxl_vit-h.bin", ) inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() image_slice.tolist() expected_slice = np.array([0.1398, 0.1476, 0.1407, 0.1442, 0.1470, 0.1480, 0.1449, 0.1481, 0.1494]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4)
diffusers/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py/0
{ "file_path": "diffusers/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py", "repo_id": "diffusers", "token_count": 9004 }
133
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import traceback import unittest import numpy as np import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AsymmetricAutoencoderKL, AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, LCMScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel, ) from diffusers.models.attention_processor import AttnProcessor from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, numpy_cosine_similarity_distance, require_python39_or_higher, require_torch_2, require_torch_gpu, run_test_in_subprocess, slow, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() # Will be run via run_test_in_subprocess def _test_inpaint_compile(in_queue, out_queue, timeout): error = None try: inputs = in_queue.get(timeout=timeout) torch_device = inputs.pop("torch_device") seed = inputs.pop("seed") inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.unet.to(memory_format=torch.channels_last) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0689, 0.0699, 0.0790, 0.0536, 0.0470, 0.0488, 0.041, 0.0508, 0.04179]) assert np.abs(expected_slice - image_slice).max() < 3e-3 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class StableDiffusionInpaintPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"mask", "masked_image_latents"}) def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), time_cond_proj_dim=time_cond_proj_dim, layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0, img_res=64, output_pil=True): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched if output_pil: # Get random floats in [0, 1] as image image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] mask_image = torch.ones_like(image) # Convert image and mask_image to [0, 255] image = 255 * image mask_image = 255 * mask_image # Convert to PIL image init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((img_res, img_res)) mask_image = Image.fromarray(np.uint8(mask_image)).convert("RGB").resize((img_res, img_res)) else: # Get random floats in [0, 1] as image with spatial size (img_res, img_res) image = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed)).to(device) # Convert image to [-1, 1] init_image = 2.0 * image - 1.0 mask_image = torch.ones((1, 1, img_res, img_res), device=device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_inpaint(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4703, 0.5697, 0.3879, 0.5470, 0.6042, 0.4413, 0.5078, 0.4728, 0.4469]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4931, 0.5988, 0.4569, 0.5556, 0.6650, 0.5087, 0.5966, 0.5358, 0.5269]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4931, 0.5988, 0.4569, 0.5556, 0.6650, 0.5087, 0.5966, 0.5358, 0.5269]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_image_tensor(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) out_pil = output.images inputs = self.get_dummy_inputs(device) inputs["image"] = torch.tensor(np.array(inputs["image"]) / 127.5 - 1).permute(2, 0, 1).unsqueeze(0) inputs["mask_image"] = torch.tensor(np.array(inputs["mask_image"]) / 255).permute(2, 0, 1)[:1].unsqueeze(0) output = sd_pipe(**inputs) out_tensor = output.images assert out_pil.shape == (1, 64, 64, 3) assert np.abs(out_pil.flatten() - out_tensor.flatten()).max() < 5e-2 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_stable_diffusion_inpaint_strength_zero_test(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) # check that the pipeline raises value error when num_inference_steps is < 1 inputs["strength"] = 0.01 with self.assertRaises(ValueError): sd_pipe(**inputs).images def test_stable_diffusion_inpaint_mask_latents(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(device) sd_pipe.set_progress_bar_config(disable=None) # normal mask + normal image ## `image`: pil, `mask_image``: pil, `masked_image_latents``: None inputs = self.get_dummy_inputs(device) inputs["strength"] = 0.9 out_0 = sd_pipe(**inputs).images # image latents + mask latents inputs = self.get_dummy_inputs(device) image = sd_pipe.image_processor.preprocess(inputs["image"]).to(sd_pipe.device) mask = sd_pipe.mask_processor.preprocess(inputs["mask_image"]).to(sd_pipe.device) masked_image = image * (mask < 0.5) generator = torch.Generator(device=device).manual_seed(0) image_latents = ( sd_pipe.vae.encode(image).latent_dist.sample(generator=generator) * sd_pipe.vae.config.scaling_factor ) torch.randn((1, 4, 32, 32), generator=generator) mask_latents = ( sd_pipe.vae.encode(masked_image).latent_dist.sample(generator=generator) * sd_pipe.vae.config.scaling_factor ) inputs["image"] = image_latents inputs["masked_image_latents"] = mask_latents inputs["mask_image"] = mask inputs["strength"] = 0.9 generator = torch.Generator(device=device).manual_seed(0) torch.randn((1, 4, 32, 32), generator=generator) inputs["generator"] = generator out_1 = sd_pipe(**inputs).images assert np.abs(out_0 - out_1).max() < 1e-2 def test_pipeline_interrupt(self): components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = "hey" num_inference_steps = 3 # store intermediate latents from the generation process class PipelineState: def __init__(self): self.state = [] def apply(self, pipe, i, t, callback_kwargs): self.state.append(callback_kwargs["latents"]) return callback_kwargs pipe_state = PipelineState() sd_pipe( prompt, image=inputs["image"], mask_image=inputs["mask_image"], num_inference_steps=num_inference_steps, output_type="np", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=pipe_state.apply, ).images # interrupt generation at step index interrupt_step_idx = 1 def callback_on_step_end(pipe, i, t, callback_kwargs): if i == interrupt_step_idx: pipe._interrupt = True return callback_kwargs output_interrupted = sd_pipe( prompt, image=inputs["image"], mask_image=inputs["mask_image"], num_inference_steps=num_inference_steps, output_type="latent", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=callback_on_step_end, ).images # fetch intermediate latents at the interrupted step # from the completed generation process intermediate_latent = pipe_state.state[interrupt_step_idx] # compare the intermediate latent to the output of the interrupted process # they should be the same assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) class StableDiffusionSimpleInpaintPipelineFastTests(StableDiffusionInpaintPipelineFastTests): pipeline_class = StableDiffusionInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, time_cond_proj_dim=time_cond_proj_dim, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs_2images(self, device, seed=0, img_res=64): # Get random floats in [0, 1] as image with spatial size (img_res, img_res) image1 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed)).to(device) image2 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed + 22)).to(device) # Convert images to [-1, 1] init_image1 = 2.0 * image1 - 1.0 init_image2 = 2.0 * image2 - 1.0 # empty mask mask_image = torch.zeros((1, 1, img_res, img_res), device=device) if str(device).startswith("mps"): generator1 = torch.manual_seed(seed) generator2 = torch.manual_seed(seed) else: generator1 = torch.Generator(device=device).manual_seed(seed) generator2 = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": ["A painting of a squirrel eating a burger"] * 2, "image": [init_image1, init_image2], "mask_image": [mask_image] * 2, "generator": [generator1, generator2], "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_inpaint(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6584, 0.5424, 0.5649, 0.5449, 0.5897, 0.6111, 0.5404, 0.5463, 0.5214]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6240, 0.5355, 0.5649, 0.5378, 0.5374, 0.6242, 0.5132, 0.5347, 0.5396]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6240, 0.5355, 0.5649, 0.5378, 0.5374, 0.6242, 0.5132, 0.5347, 0.5396]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_2_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) # test to confirm if we pass two same image, we will get same output inputs = self.get_dummy_inputs(device) gen1 = torch.Generator(device=device).manual_seed(0) gen2 = torch.Generator(device=device).manual_seed(0) for name in ["prompt", "image", "mask_image"]: inputs[name] = [inputs[name]] * 2 inputs["generator"] = [gen1, gen2] images = sd_pipe(**inputs).images assert images.shape == (2, 64, 64, 3) image_slice1 = images[0, -3:, -3:, -1] image_slice2 = images[1, -3:, -3:, -1] assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() < 1e-4 # test to confirm that if we pass two different images, we will get different output inputs = self.get_dummy_inputs_2images(device) images = sd_pipe(**inputs).images assert images.shape == (2, 64, 64, 3) image_slice1 = images[0, -3:, -3:, -1] image_slice2 = images[1, -3:, -3:, -1] assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() > 1e-2 @slow @require_torch_gpu class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ) inputs = { "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_inpaint_ddim(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0427, 0.0460, 0.0483, 0.0460, 0.0584, 0.0521, 0.1549, 0.1695, 0.1794]) assert np.abs(expected_slice - image_slice).max() < 6e-4 def test_stable_diffusion_inpaint_fp16(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1509, 0.1245, 0.1672, 0.1655, 0.1519, 0.1226, 0.1462, 0.1567, 0.2451]) assert np.abs(expected_slice - image_slice).max() < 1e-1 def test_stable_diffusion_inpaint_pndm(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0425, 0.0273, 0.0344, 0.1694, 0.1727, 0.1812, 0.3256, 0.3311, 0.3272]) assert np.abs(expected_slice - image_slice).max() < 5e-3 def test_stable_diffusion_inpaint_k_lms(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.9314, 0.7575, 0.9432, 0.8885, 0.9028, 0.7298, 0.9811, 0.9667, 0.7633]) assert np.abs(expected_slice - image_slice).max() < 6e-3 def test_stable_diffusion_inpaint_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 @require_python39_or_higher @require_torch_2 def test_inpaint_compile(self): seed = 0 inputs = self.get_inputs(torch_device, seed=seed) # Can't pickle a Generator object del inputs["generator"] inputs["torch_device"] = torch_device inputs["seed"] = seed run_test_in_subprocess(test_case=self, target_func=_test_inpaint_compile, inputs=inputs) def test_stable_diffusion_inpaint_pil_input_resolution_test(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) # change input image to a random size (one that would cause a tensor mismatch error) inputs["image"] = inputs["image"].resize((127, 127)) inputs["mask_image"] = inputs["mask_image"].resize((127, 127)) inputs["height"] = 128 inputs["width"] = 128 image = pipe(**inputs).images # verify that the returned image has the same height and width as the input height and width assert image.shape == (1, inputs["height"], inputs["width"], 3) def test_stable_diffusion_inpaint_strength_test(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) # change input strength inputs["strength"] = 0.75 image = pipe(**inputs).images # verify that the returned image has the same height and width as the input height and width assert image.shape == (1, 512, 512, 3) image_slice = image[0, 253:256, 253:256, -1].flatten() expected_slice = np.array([0.2728, 0.2803, 0.2665, 0.2511, 0.2774, 0.2586, 0.2391, 0.2392, 0.2582]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_simple_inpaint_ddim(self): pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.3757, 0.3875, 0.4445, 0.4353, 0.3780, 0.4513, 0.3965, 0.3984, 0.4362]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_download_local(self): filename = hf_hub_download("runwayml/stable-diffusion-inpainting", filename="sd-v1-5-inpainting.ckpt") pipe = StableDiffusionInpaintPipeline.from_single_file(filename, torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 1 image_out = pipe(**inputs).images[0] assert image_out.shape == (512, 512, 3) def test_download_ckpt_diff_format_is_same(self): ckpt_path = "https://huggingface.co/runwayml/stable-diffusion-inpainting/blob/main/sd-v1-5-inpainting.ckpt" pipe = StableDiffusionInpaintPipeline.from_single_file(ckpt_path) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_attn_processor(AttnProcessor()) pipe.to("cuda") inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 5 image_ckpt = pipe(**inputs).images[0] pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_attn_processor(AttnProcessor()) pipe.to("cuda") inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 5 image = pipe(**inputs).images[0] max_diff = numpy_cosine_similarity_distance(image.flatten(), image_ckpt.flatten()) assert max_diff < 1e-4 @slow @require_torch_gpu class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.TestCase): def setUp(self): super().setUp() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ) inputs = { "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_inpaint_ddim(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.vae = vae pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0522, 0.0604, 0.0596, 0.0449, 0.0493, 0.0427, 0.1186, 0.1289, 0.1442]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_inpaint_fp16(self): vae = AsymmetricAutoencoderKL.from_pretrained( "cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16 ) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.vae = vae pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1343, 0.1406, 0.1440, 0.1504, 0.1729, 0.0989, 0.1807, 0.2822, 0.1179]) assert np.abs(expected_slice - image_slice).max() < 5e-2 def test_stable_diffusion_inpaint_pndm(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.vae = vae pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0966, 0.1083, 0.1148, 0.1422, 0.1318, 0.1197, 0.3702, 0.3537, 0.3288]) assert np.abs(expected_slice - image_slice).max() < 5e-3 def test_stable_diffusion_inpaint_k_lms(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.vae = vae pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.8931, 0.8683, 0.8965, 0.8501, 0.8592, 0.9118, 0.8734, 0.7463, 0.8990]) assert np.abs(expected_slice - image_slice).max() < 6e-3 def test_stable_diffusion_inpaint_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() vae = AsymmetricAutoencoderKL.from_pretrained( "cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16 ) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16 ) pipe.vae = vae pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.45 GB is allocated assert mem_bytes < 2.45 * 10**9 @require_python39_or_higher @require_torch_2 def test_inpaint_compile(self): pass def test_stable_diffusion_inpaint_pil_input_resolution_test(self): vae = AsymmetricAutoencoderKL.from_pretrained( "cross-attention/asymmetric-autoencoder-kl-x-1-5", ) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.vae = vae pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) # change input image to a random size (one that would cause a tensor mismatch error) inputs["image"] = inputs["image"].resize((127, 127)) inputs["mask_image"] = inputs["mask_image"].resize((127, 127)) inputs["height"] = 128 inputs["width"] = 128 image = pipe(**inputs).images # verify that the returned image has the same height and width as the input height and width assert image.shape == (1, inputs["height"], inputs["width"], 3) def test_stable_diffusion_inpaint_strength_test(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.vae = vae pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) # change input strength inputs["strength"] = 0.75 image = pipe(**inputs).images # verify that the returned image has the same height and width as the input height and width assert image.shape == (1, 512, 512, 3) image_slice = image[0, 253:256, 253:256, -1].flatten() expected_slice = np.array([0.2458, 0.2576, 0.3124, 0.2679, 0.2669, 0.2796, 0.2872, 0.2975, 0.2661]) assert np.abs(expected_slice - image_slice).max() < 3e-3 def test_stable_diffusion_simple_inpaint_ddim(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None) pipe.vae = vae pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.3296, 0.4041, 0.4097, 0.4145, 0.4342, 0.4152, 0.4927, 0.4931, 0.4430]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_download_local(self): vae = AsymmetricAutoencoderKL.from_pretrained( "cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16 ) filename = hf_hub_download("runwayml/stable-diffusion-inpainting", filename="sd-v1-5-inpainting.ckpt") pipe = StableDiffusionInpaintPipeline.from_single_file(filename, torch_dtype=torch.float16) pipe.vae = vae pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 1 image_out = pipe(**inputs).images[0] assert image_out.shape == (512, 512, 3) def test_download_ckpt_diff_format_is_same(self): pass @nightly @require_torch_gpu class StableDiffusionInpaintPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ) inputs = { "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_inpaint_ddim(self): sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/stable_diffusion_inpaint_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_pndm(self): sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") sd_pipe.scheduler = PNDMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/stable_diffusion_inpaint_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_lms(self): sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/stable_diffusion_inpaint_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_dpm(self): sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 30 image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/stable_diffusion_inpaint_dpm_multi.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 class StableDiffusionInpaintingPrepareMaskAndMaskedImageTests(unittest.TestCase): def test_pil_inputs(self): height, width = 32, 32 im = np.random.randint(0, 255, (height, width, 3), dtype=np.uint8) im = Image.fromarray(im) mask = np.random.randint(0, 255, (height, width), dtype=np.uint8) > 127.5 mask = Image.fromarray((mask * 255).astype(np.uint8)) t_mask, t_masked, t_image = prepare_mask_and_masked_image(im, mask, height, width, return_image=True) self.assertTrue(isinstance(t_mask, torch.Tensor)) self.assertTrue(isinstance(t_masked, torch.Tensor)) self.assertTrue(isinstance(t_image, torch.Tensor)) self.assertEqual(t_mask.ndim, 4) self.assertEqual(t_masked.ndim, 4) self.assertEqual(t_image.ndim, 4) self.assertEqual(t_mask.shape, (1, 1, height, width)) self.assertEqual(t_masked.shape, (1, 3, height, width)) self.assertEqual(t_image.shape, (1, 3, height, width)) self.assertTrue(t_mask.dtype == torch.float32) self.assertTrue(t_masked.dtype == torch.float32) self.assertTrue(t_image.dtype == torch.float32) self.assertTrue(t_mask.min() >= 0.0) self.assertTrue(t_mask.max() <= 1.0) self.assertTrue(t_masked.min() >= -1.0) self.assertTrue(t_masked.min() <= 1.0) self.assertTrue(t_image.min() >= -1.0) self.assertTrue(t_image.min() >= -1.0) self.assertTrue(t_mask.sum() > 0.0) def test_np_inputs(self): height, width = 32, 32 im_np = np.random.randint(0, 255, (height, width, 3), dtype=np.uint8) im_pil = Image.fromarray(im_np) mask_np = ( np.random.randint( 0, 255, ( height, width, ), dtype=np.uint8, ) > 127.5 ) mask_pil = Image.fromarray((mask_np * 255).astype(np.uint8)) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) t_mask_pil, t_masked_pil, t_image_pil = prepare_mask_and_masked_image( im_pil, mask_pil, height, width, return_image=True ) self.assertTrue((t_mask_np == t_mask_pil).all()) self.assertTrue((t_masked_np == t_masked_pil).all()) self.assertTrue((t_image_np == t_image_pil).all()) def test_torch_3D_2D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy().transpose(1, 2, 0) mask_np = mask_tensor.numpy() t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_3D_3D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 1, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy().transpose(1, 2, 0) mask_np = mask_tensor.numpy()[0] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_4D_2D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 1, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy()[0].transpose(1, 2, 0) mask_np = mask_tensor.numpy() t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_4D_3D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 1, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 1, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy()[0].transpose(1, 2, 0) mask_np = mask_tensor.numpy()[0] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_4D_4D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 1, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 1, 1, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy()[0].transpose(1, 2, 0) mask_np = mask_tensor.numpy()[0][0] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_batch_4D_3D(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 2, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 2, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_nps = [im.numpy().transpose(1, 2, 0) for im in im_tensor] mask_nps = [mask.numpy() for mask in mask_tensor] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) nps = [prepare_mask_and_masked_image(i, m, height, width, return_image=True) for i, m in zip(im_nps, mask_nps)] t_mask_np = torch.cat([n[0] for n in nps]) t_masked_np = torch.cat([n[1] for n in nps]) t_image_np = torch.cat([n[2] for n in nps]) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_batch_4D_4D(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 2, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 2, 1, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_nps = [im.numpy().transpose(1, 2, 0) for im in im_tensor] mask_nps = [mask.numpy()[0] for mask in mask_tensor] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) nps = [prepare_mask_and_masked_image(i, m, height, width, return_image=True) for i, m in zip(im_nps, mask_nps)] t_mask_np = torch.cat([n[0] for n in nps]) t_masked_np = torch.cat([n[1] for n in nps]) t_image_np = torch.cat([n[2] for n in nps]) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_shape_mismatch(self): height, width = 32, 32 # test height and width with self.assertRaises(AssertionError): prepare_mask_and_masked_image( torch.randn( 3, height, width, ), torch.randn(64, 64), height, width, return_image=True, ) # test batch dim with self.assertRaises(AssertionError): prepare_mask_and_masked_image( torch.randn( 2, 3, height, width, ), torch.randn(4, 64, 64), height, width, return_image=True, ) # test batch dim with self.assertRaises(AssertionError): prepare_mask_and_masked_image( torch.randn( 2, 3, height, width, ), torch.randn(4, 1, 64, 64), height, width, return_image=True, ) def test_type_mismatch(self): height, width = 32, 32 # test tensors-only with self.assertRaises(TypeError): prepare_mask_and_masked_image( torch.rand( 3, height, width, ), torch.rand( 3, height, width, ).numpy(), height, width, return_image=True, ) # test tensors-only with self.assertRaises(TypeError): prepare_mask_and_masked_image( torch.rand( 3, height, width, ).numpy(), torch.rand( 3, height, width, ), height, width, return_image=True, ) def test_channels_first(self): height, width = 32, 32 # test channels first for 3D tensors with self.assertRaises(AssertionError): prepare_mask_and_masked_image( torch.rand(height, width, 3), torch.rand( 3, height, width, ), height, width, return_image=True, ) def test_tensor_range(self): height, width = 32, 32 # test im <= 1 with self.assertRaises(ValueError): prepare_mask_and_masked_image( torch.ones( 3, height, width, ) * 2, torch.rand( height, width, ), height, width, return_image=True, ) # test im >= -1 with self.assertRaises(ValueError): prepare_mask_and_masked_image( torch.ones( 3, height, width, ) * (-2), torch.rand( height, width, ), height, width, return_image=True, ) # test mask <= 1 with self.assertRaises(ValueError): prepare_mask_and_masked_image( torch.rand( 3, height, width, ), torch.ones( height, width, ) * 2, height, width, return_image=True, ) # test mask >= 0 with self.assertRaises(ValueError): prepare_mask_and_masked_image( torch.rand( 3, height, width, ), torch.ones( height, width, ) * -1, height, width, return_image=True, )
diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py", "repo_id": "diffusers", "token_count": 30633 }
134
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, StableDiffusionGLIGENPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class GligenPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionGLIGENPipeline params = TEXT_TO_IMAGE_PARAMS | {"gligen_phrases", "gligen_boxes"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, attention_type="gated", ) # unet.position_net = PositionNet(32,32) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A modern livingroom", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "gligen_phrases": ["a birthday cake"], "gligen_boxes": [[0.2676, 0.6088, 0.4773, 0.7183]], "output_type": "np", } return inputs def test_stable_diffusion_gligen_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionGLIGENPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5069, 0.5561, 0.4577, 0.4792, 0.5203, 0.4089, 0.5039, 0.4919, 0.4499]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_gligen_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionGLIGENPipeline(**components) sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.425, 0.494, 0.429, 0.469, 0.525, 0.417, 0.533, 0.5, 0.47]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(batch_size=3, expected_max_diff=3e-3)
diffusers/tests/pipelines/stable_diffusion_gligen/test_stable_diffusion_gligen.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_gligen/test_stable_diffusion_gligen.py", "repo_id": "diffusers", "token_count": 2746 }
135
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import gc import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, LCMScheduler, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLPipeline, UNet2DConditionModel, UniPCMultistepScheduler, ) from diffusers.utils.testing_utils import ( enable_full_determinism, load_image, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin enable_full_determinism() class StableDiffusionXLPipelineFastTests( PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"add_text_embeds", "add_time_ids"}) def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(2, 4), layers_per_block=2, time_cond_proj_dim=time_cond_proj_dim, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, norm_num_groups=1, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "image_encoder": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", } return inputs def test_stable_diffusion_xl_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5552, 0.5569, 0.4725, 0.4348, 0.4994, 0.4632, 0.5142, 0.5012, 0.47]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_euler_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4917, 0.6555, 0.4348, 0.5219, 0.7324, 0.4855, 0.5168, 0.5447, 0.5156]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_euler_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4917, 0.6555, 0.4348, 0.5219, 0.7324, 0.4855, 0.5168, 0.5447, 0.5156]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 2 * [inputs["prompt"]] inputs["num_images_per_prompt"] = 2 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) prompt = 2 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_xl_negative_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt, negative_prompt=negative_prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components() @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_img2img_prompt_embeds_only(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, _, pooled_prompt_embeds, _, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_two_xl_mixture_of_denoiser_fast(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split, scheduler_cls_orig, expected_tss, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split, expected_tss)) expected_steps = expected_steps_1 + expected_steps_2 else: expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) expected_steps_2 = list(filter(lambda ts: ts < split, expected_tss)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = { **inputs, **{ "denoising_end": 1.0 - (split / num_train_timesteps), "output_type": "latent", }, } latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" inputs_2 = { **inputs, **{ "denoising_start": 1.0 - (split / num_train_timesteps), "image": latents, }, } pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" steps = 10 for split in [300, 700]: for scheduler_cls_timesteps in [ (EulerDiscreteScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]), ( HeunDiscreteScheduler, [ 901.0, 801.0, 801.0, 701.0, 701.0, 601.0, 601.0, 501.0, 501.0, 401.0, 401.0, 301.0, 301.0, 201.0, 201.0, 101.0, 101.0, 1.0, 1.0, ], ), ]: assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1]) @slow def test_stable_diffusion_two_xl_mixture_of_denoiser(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split, scheduler_cls_orig, expected_tss, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split, expected_tss)) expected_steps = expected_steps_1 + expected_steps_2 else: expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) expected_steps_2 = list(filter(lambda ts: ts < split, expected_tss)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = { **inputs, **{ "denoising_end": 1.0 - (split / num_train_timesteps), "output_type": "latent", }, } latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" inputs_2 = { **inputs, **{ "denoising_start": 1.0 - (split / num_train_timesteps), "image": latents, }, } pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" steps = 10 for split in [300, 500, 700]: for scheduler_cls_timesteps in [ (DDIMScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]), (EulerDiscreteScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]), (DPMSolverMultistepScheduler, [901, 811, 721, 631, 541, 451, 361, 271, 181, 91]), (UniPCMultistepScheduler, [901, 811, 721, 631, 541, 451, 361, 271, 181, 91]), ( HeunDiscreteScheduler, [ 901.0, 801.0, 801.0, 701.0, 701.0, 601.0, 601.0, 501.0, 501.0, 401.0, 401.0, 301.0, 301.0, 201.0, 201.0, 101.0, 101.0, 1.0, 1.0, ], ), ]: assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1]) steps = 25 for split in [300, 500, 700]: for scheduler_cls_timesteps in [ ( DDIMScheduler, [ 961, 921, 881, 841, 801, 761, 721, 681, 641, 601, 561, 521, 481, 441, 401, 361, 321, 281, 241, 201, 161, 121, 81, 41, 1, ], ), ( EulerDiscreteScheduler, [ 961.0, 921.0, 881.0, 841.0, 801.0, 761.0, 721.0, 681.0, 641.0, 601.0, 561.0, 521.0, 481.0, 441.0, 401.0, 361.0, 321.0, 281.0, 241.0, 201.0, 161.0, 121.0, 81.0, 41.0, 1.0, ], ), ( DPMSolverMultistepScheduler, [ 951, 913, 875, 837, 799, 761, 723, 685, 647, 609, 571, 533, 495, 457, 419, 381, 343, 305, 267, 229, 191, 153, 115, 77, 39, ], ), ( UniPCMultistepScheduler, [ 951, 913, 875, 837, 799, 761, 723, 685, 647, 609, 571, 533, 495, 457, 419, 381, 343, 305, 267, 229, 191, 153, 115, 77, 39, ], ), ( HeunDiscreteScheduler, [ 961.0, 921.0, 921.0, 881.0, 881.0, 841.0, 841.0, 801.0, 801.0, 761.0, 761.0, 721.0, 721.0, 681.0, 681.0, 641.0, 641.0, 601.0, 601.0, 561.0, 561.0, 521.0, 521.0, 481.0, 481.0, 441.0, 441.0, 401.0, 401.0, 361.0, 361.0, 321.0, 321.0, 281.0, 281.0, 241.0, 241.0, 201.0, 201.0, 161.0, 161.0, 121.0, 121.0, 81.0, 81.0, 41.0, 41.0, 1.0, 1.0, ], ), ]: assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1]) @slow def test_stable_diffusion_three_xl_mixture_of_denoiser(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() pipe_3 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipe_3.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split_1, split_2, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) pipe_3.scheduler = scheduler_cls.from_config(pipe_3.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() split_1_ts = num_train_timesteps - int(round(num_train_timesteps * split_1)) split_2_ts = num_train_timesteps - int(round(num_train_timesteps * split_2)) if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) expected_steps_2 = expected_steps_1[-1:] + list( filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps) ) expected_steps_3 = expected_steps_2[-1:] + list(filter(lambda ts: ts < split_2_ts, expected_steps)) expected_steps = expected_steps_1 + expected_steps_2 + expected_steps_3 else: expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) expected_steps_2 = list(filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps)) expected_steps_3 = list(filter(lambda ts: ts < split_2_ts, expected_steps)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = {**inputs, **{"denoising_end": split_1, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] assert ( expected_steps_1 == done_steps ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" with self.assertRaises(ValueError) as cm: inputs_2 = { **inputs, **{ "denoising_start": split_2, "denoising_end": split_1, "image": latents, "output_type": "latent", }, } pipe_2(**inputs_2).images[0] assert "cannot be larger than or equal to `denoising_end`" in str(cm.exception) inputs_2 = { **inputs, **{"denoising_start": split_1, "denoising_end": split_2, "image": latents, "output_type": "latent"}, } pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] inputs_3 = {**inputs, **{"denoising_start": split_2, "image": latents}} pipe_3(**inputs_3).images[0] assert expected_steps_3 == done_steps[len(expected_steps_1) + len(expected_steps_2) :] assert ( expected_steps == done_steps ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" for steps in [7, 11, 20]: for split_1, split_2 in zip([0.19, 0.32], [0.81, 0.68]): for scheduler_cls in [ DDIMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, UniPCMultistepScheduler, HeunDiscreteScheduler, ]: assert_run_mixture(steps, split_1, split_2, scheduler_cls) def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 def test_stable_diffusion_xl_negative_conditions(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_with_no_neg_cond = image[0, -3:, -3:, -1] image = sd_pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=(0, 0), negative_target_size=(1024, 1024), ).images image_slice_with_neg_cond = image[0, -3:, -3:, -1] self.assertTrue(np.abs(image_slice_with_no_neg_cond - image_slice_with_neg_cond).max() > 1e-2) def test_stable_diffusion_xl_save_from_pretrained(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components).to(torch_device) pipes.append(sd_pipe) with tempfile.TemporaryDirectory() as tmpdirname: sd_pipe.save_pretrained(tmpdirname) sd_pipe = StableDiffusionXLPipeline.from_pretrained(tmpdirname).to(torch_device) pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 def test_stable_diffusion_xl_with_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images original_image_slice = image[0, -3:, -3:, -1] sd_pipe.fuse_qkv_projections() inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_fused = image[0, -3:, -3:, -1] sd_pipe.unfuse_qkv_projections() inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] assert np.allclose( original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2 ), "Fusion of QKV projections shouldn't affect the outputs." assert np.allclose( image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2 ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." assert np.allclose( original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 ), "Original outputs should match when fused QKV projections are disabled." def test_pipeline_interrupt(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "hey" num_inference_steps = 3 # store intermediate latents from the generation process class PipelineState: def __init__(self): self.state = [] def apply(self, pipe, i, t, callback_kwargs): self.state.append(callback_kwargs["latents"]) return callback_kwargs pipe_state = PipelineState() sd_pipe( prompt, num_inference_steps=num_inference_steps, output_type="np", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=pipe_state.apply, ).images # interrupt generation at step index interrupt_step_idx = 1 def callback_on_step_end(pipe, i, t, callback_kwargs): if i == interrupt_step_idx: pipe._interrupt = True return callback_kwargs output_interrupted = sd_pipe( prompt, num_inference_steps=num_inference_steps, output_type="latent", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=callback_on_step_end, ).images # fetch intermediate latents at the interrupted step # from the completed generation process intermediate_latent = pipe_state.state[interrupt_step_idx] # compare the intermediate latent to the output of the interrupted process # they should be the same assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) @slow class StableDiffusionXLPipelineIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_lcm(self): torch.manual_seed(0) unet = UNet2DConditionModel.from_pretrained( "latent-consistency/lcm-ssd-1b", torch_dtype=torch.float16, variant="fp16" ) sd_pipe = StableDiffusionXLPipeline.from_pretrained( "segmind/SSD-1B", unet=unet, torch_dtype=torch.float16, variant="fp16" ).to(torch_device) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) prompt = "a red car standing on the side of the street" image = sd_pipe(prompt, num_inference_steps=4, guidance_scale=8.0).images[0] expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_full/stable_diffusion_ssd_1b_lcm.png" ) image = sd_pipe.image_processor.pil_to_numpy(image) expected_image = sd_pipe.image_processor.pil_to_numpy(expected_image) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 1e-2 def test_download_ckpt_diff_format_is_same(self): ckpt_path = ( "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors" ) pipe = StableDiffusionXLPipeline.from_single_file(ckpt_path, torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.enable_model_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) image_ckpt = pipe("a turtle", num_inference_steps=2, generator=generator, output_type="np").images[0] pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.enable_model_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) image = pipe("a turtle", num_inference_steps=2, generator=generator, output_type="np").images[0] max_diff = numpy_cosine_similarity_distance(image.flatten(), image_ckpt.flatten()) assert max_diff < 6e-3
diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py", "repo_id": "diffusers", "token_count": 22499 }
136
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class DownloadTests(unittest.TestCase): def test_download_only_pytorch(self): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _ = FlaxDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname, os.listdir(tmpdirname)[0], "snapshots"))] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(".bin") for f in files) @slow @require_flax class FlaxPipelineTests(unittest.TestCase): def test_dummy_all_tpus(self): pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None ) prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 4 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 4.1514745) < 1e-3 assert np.abs(np.abs(images, dtype=np.float32).sum() - 49947.875) < 5e-1 images_pil = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) assert len(images_pil) == num_samples def test_stable_diffusion_v1_4(self): pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="flax", safety_checker=None ) prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.05652401)) < 1e-2 assert np.abs((np.abs(images, dtype=np.float32).sum() - 2383808.2)) < 5e-1 def test_stable_diffusion_v1_4_bfloat_16(self): pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, safety_checker=None ) prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.04003906)) < 5e-2 assert np.abs((np.abs(images, dtype=np.float32).sum() - 2373516.75)) < 5e-1 def test_stable_diffusion_v1_4_bfloat_16_with_safety(self): pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16 ) prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.04003906)) < 5e-2 assert np.abs((np.abs(images, dtype=np.float32).sum() - 2373516.75)) < 5e-1 def test_stable_diffusion_v1_4_bfloat_16_ddim(self): scheduler = FlaxDDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=False, steps_offset=1, ) pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, scheduler=scheduler, safety_checker=None, ) scheduler_state = scheduler.create_state() params["scheduler"] = scheduler_state prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.045043945)) < 5e-2 assert np.abs((np.abs(images, dtype=np.float32).sum() - 2347693.5)) < 5e-1 def test_jax_memory_efficient_attention(self): prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) num_samples = jax.device_count() prompt = num_samples * [prompt] prng_seed = jax.random.split(jax.random.PRNGKey(0), num_samples) pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, safety_checker=None, ) params = replicate(params) prompt_ids = pipeline.prepare_inputs(prompt) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) slice = images[2, 0, 256, 10:17, 1] # With memory efficient attention pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, safety_checker=None, use_memory_efficient_attention=True, ) params = replicate(params) prompt_ids = pipeline.prepare_inputs(prompt) prompt_ids = shard(prompt_ids) images_eff = pipeline(prompt_ids, params, prng_seed, jit=True).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) slice_eff = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice).max() < 1e-2
diffusers/tests/pipelines/test_pipelines_flax.py/0
{ "file_path": "diffusers/tests/pipelines/test_pipelines_flax.py", "repo_id": "diffusers", "token_count": 4560 }
137
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class IPNDMSchedulerTest(SchedulerCommonTest): scheduler_classes = (IPNDMScheduler,) forward_default_kwargs = (("num_inference_steps", 50),) def get_scheduler_config(self, **kwargs): config = {"num_train_timesteps": 1000} config.update(**kwargs) return config def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals scheduler.ets = dummy_past_residuals[:] if time_step is None: time_step = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals new_scheduler.ets = dummy_past_residuals[:] output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): pass def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals (must be after setting timesteps) scheduler.ets = dummy_past_residuals[:] if time_step is None: time_step = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) # copy over dummy past residuals new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residual (must be after setting timesteps) new_scheduler.ets = dummy_past_residuals[:] output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def full_loop(self, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample scheduler._step_index = None for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample return sample def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] scheduler.ets = dummy_past_residuals[:] time_step_0 = scheduler.timesteps[5] time_step_1 = scheduler.timesteps[6] output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=timesteps, time_step=None) def test_inference_steps(self): for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]): self.check_over_forward(num_inference_steps=num_inference_steps, time_step=None) def test_full_loop_no_noise(self): sample = self.full_loop() result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 2540529) < 10
diffusers/tests/schedulers/test_scheduler_ipndm.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_ipndm.py", "repo_id": "diffusers", "token_count": 3120 }
138
# JAX/Diffusers community sprint Welcome to the JAX/Diffusers community sprint! The goal of this sprint is to work on fun and creative diffusion models using JAX and Diffusers. In this event, we will create various applications with diffusion models in JAX/Flax and Diffusers using free TPU hours generously provided by Google Cloud. This document is a walkthrough on all the important information to make a submission to the JAX/Diffusers community sprint. Don't forget to fill out the [signup form]! > 💡 Note: This document is still WIP and it only contains initial details of the event. We will keep updating this document as we make other relevant information available throughout the community sprint. ## Organization Participants can propose ideas for an interesting project involving diffusion models. Teams of 3 to 5 will then be formed around the most promising and interesting projects. Make sure to read through the [Communication](#communication) section on how to propose projects, comment on other participants' project ideas, and create a team. To help each team successfully finish their project, we will organize talks by leading scientists and engineers from Google, Hugging Face, and the open-source diffusion community. The talks will take place on 17th of April. Make sure to attend the talks to get the most out of your participation! Check out the [Talks](#talks) section to get an overview of the talks, including the speaker and the time of the talk. Each team is then given **free access to a TPU v4-8 VM** from April 14 to May 1st. In addition, we will provide a training example in JAX/Flax and Diffusers to train [ControlNets](https://huggingface.co/blog/controlnet) to kick-start your project. We will also provide examples of how to prepare datasets for ControlNet training. During the sprint, we'll make sure to answer any questions you might have about JAX/Flax and Diffusers and help each team as much as possible to complete their projects! > 💡 Note: We will not be distributing TPUs for single member teams, so you are encouraged to either join a team or find teammates for your idea. At the end of the community sprint, each submission will be evaluated by a jury and the top-3 demos will be awarded a prize. Check out the [How to submit a demo] (TODO) section for more information and suggestions on how to submit your project. > 💡 Note: Even though we provide an example for performing ControlNet training, participants can propose ideas that do not involve ControlNets at all. But the ideas need to be centered around diffusion models. ## Important dates - **29.03.** Official announcement of the community week. Make sure to fill out the [signup form]. - **31.03.** Start forming groups in #jax-diffusers-ideas channel in Discord. - **10.04.** Data collection. - **13.04. - 14.04. - [17.04.](https://www.youtube.com/watch?v=SOj2sxgvFe0)** Kick-off event with talks on Youtube. - **14.04. - 17.04.** Start providing access to TPUs. - **01.05.** Shutdown access to TPUs. - **08.05.**: Announcement of the top 10 projects and prizes. > 💡 Note: We will be accepting applications throughout the sprint. ## Communication All important communication will take place on our Discord server. Join the server using [this link](https://hf.co/join/discord). After you join the server, take the Diffusers role in `#role-assignment` channel and head to `#jax-diffusers-ideas` channel to share your idea as a forum post. To sign up for participation, fill out the [signup form] and we will give you access to two more Discord channels on discussions and technical support, and access to TPUs. Important announcements of the Hugging Face, Flax/JAX, and Google Cloud team will be posted in the server. The Discord server will be the central place for participants to post about their results, share their learning experiences, ask questions and get technical support in various obstacles they encounter. For issues with Flax/JAX, Diffusers, Datasets or for questions that are specific to your project we will be interacting through public repositories and forums: - Flax: [Issues](https://github.com/google/flax/issues), [Questions](https://github.com/google/flax/discussions) - JAX: [Issues](https://github.com/google/jax/issues), [Questions](https://github.com/google/jax/discussions) - 🤗 Diffusers: [Issues](https://github.com/huggingface/diffusers/issues), [Questions](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) - 🤗 Datasets: [Issues](https://github.com/huggingface/datasets/issues), [Questions](https://discuss.huggingface.co/c/datasets/10) - Project specific questions: Can be asked from each project's own post on #jax-diffusers-ideas channel on Discord. - TPU related questions: `#jax-diffusers-tpu-support` channel on Discord. - General discussion: `#jax-diffusers-sprint channel` on Discord. You will get access to `#jax-diffusers-tpu-support` and `#jax-diffusers-sprint` once you are accepted to attend the sprint. When asking for help, we encourage you to post the link to [forum](https://discuss.huggingface.co) post to the Discord server, instead of directly posting issues or questions. This way, we make sure that the everybody in the community can benefit from your questions, even after the community sprint. > 💡 Note: After 10th of April, if you have signed up on the google form, but you are not in the Discord channel, please leave a message on [the official forum announcement](https://discuss.huggingface.co/t/controlling-stable-diffusion-with-jax-and-diffusers-using-v4-tpus/35187/2) and ping `@mervenoyan`, `@sayakpaul`, and `@patrickvonplaten`. We might take a day to process these requests. ## Talks We have invited prominent researchers and engineers from Google, Hugging Face, and the open-source community who are working in the Generative AI space. We will update this section with links to the talks, so keep an eye here or on Discord in diffusion models core-announcements channel and set your reminders! ### **April 13, 2023** | Speaker | Topic | Time | Video | |---|---|---|---| [Emiel Hoogeboom, Google Brain](https://twitter.com/emiel_hoogeboom?lang=en) | Pixel-Space Diffusion models for High Resolution Images | 4.00pm-4.40pm CEST / 7.00am-7.40am PST| [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=iw2WCAGxdQ4) | | [Apolinário Passos, Hugging Face](https://twitter.com/multimodalart?lang=en) | Introduction to Diffusers library | 4.40pm-5.20pm CEST / 7.40am-08.20am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=iw2WCAGxdQ4) | [Ting Chen, Google Brain](https://twitter.com/tingchenai?lang=en) | Diffusion++: discrete data and high-dimensional generation | 5.45pm-6.25pm CEST / 08.45am-09.25am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=iw2WCAGxdQ4) | ### **April 14, 2023** | Speaker | Topic | Time | Video | |---|---|---|---| | [Tim Salimans, Google Brain](https://twitter.com/timsalimans?lang=en) | Efficient image and video generation with distilled diffusion models | 4.00pm-4.40pm CEST / 7.00am-7.40am PST| [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=6f5chgbKjSg&ab_channel=HuggingFace) | | [Suraj Patil, Hugging Face](https://twitter.com/psuraj28?lang=en) | Masked Generative Models: MaskGIT/Muse | 4.40pm-5.20pm CEST / 7.40am-08.20am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=6f5chgbKjSg&ab_channel=HuggingFace) | | [Sabrina Mielke, John Hopkins University](https://twitter.com/sjmielke?lang=en) | From stateful code to purified JAX: how to build your neural net framework | 5.20pm-6.00pm CEST / 08.20am-09.00am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=6f5chgbKjSg&ab_channel=HuggingFace) | ### **April 17, 2023** | Speaker | Topic | Time | Video | |---|---|---|---| | [Andreas Steiner, Google Brain](https://twitter.com/AndreasPSteiner) | JAX & ControlNet | 4.00pm-4.40pm CEST / 7.00am-7.40am PST| [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=SOj2sxgvFe0) | | [Boris Dayma, craiyon](https://twitter.com/borisdayma?lang=en) | DALL-E Mini | 4.40pm-5.20pm CEST / 7.40am-08.20am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=SOj2sxgvFe0) | | [Margaret Mitchell, Hugging Face](https://twitter.com/mmitchell_ai?ref_src=twsrc%5Egoogle%7Ctwcamp%5Eserp%7Ctwgr%5Eauthor) | Ethics of Text-to-Image | 5.20pm-6.00pm CEST / 08.20am-09.00am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=SOj2sxgvFe0) | [signup form]: https://forms.gle/t3M7aNPuLL9V1sfa9 ## Data and Pre-Processing In this section, we will cover how to build your own dataset for ControlNet training. ### Prepare a large local dataset #### Mount a disk If you need extra space, you can follow [this guide](https://cloud.google.com/tpu/docs/setup-persistent-disk#prerequisites) to create a persistent disk, attach it to your TPU VM, and create a directory to mount the disk. You can then use this directory to store your dataset. As a side note, the TPU VM allocated to your team has a 3 TB persistent storage drive attached to it. To learn how to use it, check out [this guide](https://cloud.google.com/tpu/docs/setup-persistent-disk#mount-pd). #### Data preprocessing Here we demonstrate how to prepare a large dataset to train a ControlNet model with canny edge detection. More specifically, we provide an [example script](./dataset_tools/coyo_1m_dataset_preprocess.py) that: * Selects 1 million image-text pairs from an existing dataset [COYO-700M](https://huggingface.co/datasets/kakaobrain/coyo-700m). * Downloads each image and use Canny edge detector to generate the conditioning image. * Create a metafile that links all the images and processed images to their text captions. Use the following command to run the example data preprocessing script. If you've mounted a disk to your TPU, you should place your `train_data_dir` and `cache_dir` on the mounted disk ```bash python3 coyo_1m_dataset_preprocess.py \ --train_data_dir="/mnt/disks/persist/data" \ --cache_dir="/mnt/disks/persist" \ --max_train_samples=1000000 \ --num_proc=16 ``` Once the script finishes running, you can find a data folder at the specified `train_data_dir` with the below folder structure: ``` data ├── images │ ├── image_1.png │ ├── ....... │ └── image_1000000.jpeg ├── processed_images │ ├── image_1.png │ ├── ....... │ └── image_1000000.jpeg └── meta.jsonl ``` #### Load dataset To load a dataset from the data folder you just created, you should add a dataset loading script to your data folder. The dataset loading script should have the same name as the folder. For example, if your data folder is `data`, you should add a data loading script named `data.py`. We provide an [example data loading script](./dataset_tools/data.py) for you to use. All you need to do is to update the `DATA_DIR` with the correct path to your data folder. For more details about how to write a dataset loading script, refer to the [documentation](https://huggingface.co/docs/datasets/dataset_script). Once the dataset loading script is added to your data folder, you can load it with: ```python dataset = load_dataset("/mnt/disks/persist/data", cache_dir="/mnt/disks/persist" ) ``` Note that you can use the `--train_data_dir` flag to pass your data folder directory to the training script and generate your dataset automatically during the training. For large datasets, we recommend generating the dataset once and saving it on the disk with ```python dataset.save_to_disk("/mnt/disks/persist/dataset") ``` You can then reuse the saved dataset for your training by passing the `--load_from_disk` flag. Here is an example to run a training script that will load the dataset from the disk ```python export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="/mnt/disks/persist/canny_model" export DATASET_DIR="/mnt/disks/persist/dataset" export DISK_DIR="/mnt/disks/persist" python3 train_controlnet_flax.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --train_data_dir=$DATASET_DIR \ --load_from_disk \ --cache_dir=$DISK_DIR \ --resolution=512 \ --learning_rate=1e-5 \ --train_batch_size=2 \ --revision="non-ema" \ --from_pt \ --max_train_steps=500000 \ --checkpointing_steps=10000 \ --dataloader_num_workers=16 ``` ### Prepare a dataset with MediaPipe and Hugging Face We provide a notebook ([![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/community-events/blob/main/jax-controlnet-sprint/dataset_tools/create_pose_dataset.ipynb)) that shows you how to prepare a dataset for ControlNet training using [MediaPipe](https://developers.google.com/mediapipe) and Hugging Face. Specifically, in the notebook, we show: * How to leverage MediaPipe solutions to extract pose body joints from the input images. * Predict captions using BLIP-2 from the input images using 🤗 Transformers. * Build and push the final dataset to the Hugging Face Hub using 🤗 Datasets. You can refer to the notebook to create your own datasets using other MediaPipe solutions as well. Below, we list all the relevant ones: * [Pose Landmark Detection](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker) * [Face Landmark Detection](https://developers.google.com/mediapipe/solutions/vision/face_landmarker) * [Selfie Segmentation](https://developers.google.com/mediapipe/solutions/vision/image_segmenter) ## Training ControlNet This is perhaps the most fun and interesting part of this document as here we show you how to train a custom ControlNet model. > 💡 Note: For this sprint, you are NOT restricted to just training ControlNets. We provide this training script as a reference for you to get started. For faster training on TPUs and GPUs you can leverage the Flax training example. Follow the instructions above to get the model and dataset before running the script. ### Setting up your TPU VM _Before proceeding with the rest of this section, you must ensure that the email address you're using has been added to the `hf-flax` project on Google Cloud Platform. If it's not the case, please let us know in the Discord server (you can tag `@sayakpaul`, `@merve`, and `@patrickvonplaten`)._ In the following, we will describe how to do so using a standard console, but you should also be able to connect to the TPU VM via IDEs, like Visual Studio Code, etc. 1. You need to install the [Google Cloud SDK](https://cloud.google.com/sdk/docs/install). Please follow the instructions on https://cloud.google.com/sdk. 2. Once you've installed the Google Cloud SDK, you should set your account by running the following command. Make sure that <your-email-address> corresponds to the gmail address you used to sign up for this event. ```bash gcloud config set account <your-email-adress> ``` 3. Let's also make sure the correct project is set in case your email is used for multiple gcloud projects: ```bash gcloud config set project hf-flax ``` 4. Next, you will need to authenticate yourself. You can do so by running: ```bash gcloud auth login ``` This should give you a link to a website, where you can authenticate your gmail account. 5. Finally, you can establish an SSH tunnel into the TPU VM! Please run the following command by setting`--zone` to `us-central2-b` and to the TPU name also sent to you via email from the Hugging Face team. ```bash gcloud alpha compute tpus tpu-vm ssh <tpu-name> --zone <zone> --project hf-flax ``` This should establish an SSH tunnel into the TPU VM! > 💡 Note: You are NOT supposed to have access to the Google Cloud console. Also, you might not get an invitation link to join the `hf-flax` project. But you should still be able to access the TPU VM following the above steps. > 💡 Note: The TPU VMs are already attached to persistent storage drives (of 3 TB). This will be helpful in case your team wants to perform training on a large dataset locally. The disk name of the storage drive should also be present in the email you received. Follow [this section](https://github.com/huggingface/community-events/tree/main/jax-controlnet-sprint#mount-a-disk) for more details. ### Installing JAX Let's first create a Python virtual environment: ```bash python3 -m venv <your-venv-name> ``` We can activate the environment by running: ```bash source ~/<your-venv-name>/bin/activate ``` Then install Diffusers and the library's training dependencies: ```bash pip install git+https://github.com/huggingface/diffusers.git ``` Then clone this repository and install JAX, Flax and the other dependencies: ```bash git clone https://github.com/huggingface/community-events cd community-events/jax-controlnet-sprint/training_scripts pip install -U -r requirements_flax.txt ``` To verify that JAX was correctly installed, you can run the following command: ```python import jax jax.device_count() ``` This should display the number of TPU cores, which should be 4 on a TPUv4-8 VM. If Python is not able to detect the TPU device, please take a look at [this section](#troubleshoot-your-tpu-vm) for solutions. If you want to use Weights and Biases logging, you should also install `wandb` now: ```bash pip install wandb ``` > 💡 Note: Weights & Biases is free for students, educators, and academic researchers. All participants of our event are qualified to get an academic Weights & Biases team account. To create your team, you can visit https://wandb.ai/create-team and choose the team type to be "Academic". For more information regarding creation and management of Weights & Biases team, you can checkout https://docs.wandb.ai/guides/app/features/teams. ### Running the training script Now let's download two conditioning images that we will use to run validation during the training in order to track our progress ```bash wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png ``` We encourage you to store or share your model with the community. To use Hugging Face hub, please login to your Hugging Face account, or ([create one](https://huggingface.co/docs/diffusers/main/en/training/hf.co/join) if you don’t have one already): ```bash huggingface-cli login ``` Make sure you have the `MODEL_DIR`,`OUTPUT_DIR` and `HUB_MODEL_ID` environment variables set. The `OUTPUT_DIR` and `HUB_MODEL_ID` variables specify where to save the model to on the Hub: ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="runs/fill-circle-{timestamp}" export HUB_MODEL_ID="controlnet-fill-circle" ``` And finally start the training (make sure you're in the `jax-controlnet-sprint/training_scripts` directory)! ```bash python3 train_controlnet_flax.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --validation_steps=1000 \ --train_batch_size=2 \ --revision="non-ema" \ --from_pt \ --report_to="wandb" \ --tracker_project_name=$HUB_MODEL_ID \ --num_train_epochs=11 \ --push_to_hub \ --hub_model_id=$HUB_MODEL_ID ``` Note that `--from_pt` argument will convert your pytorch checkpoint into flax. However, it will only work with checkpoints in diffusers format. If your `MODEL_DIR` does not contain checkpoints in diffusers format, you cannot use the `--from_pt` argument. You can convert your `ckpt` or `safetensors` checkpoints into diffusers format using [this script](https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py). Since we passed the `--push_to_hub` flag, it will automatically create a model repo under your Hugging Face account based on `$HUB_MODEL_ID`. By the end of training, the final checkpoint will be automatically stored on the hub. You can find an example model repo [here](https://huggingface.co/YiYiXu/fill-circle-controlnet). Our training script also provides limited support for streaming large datasets from the Hugging Face Hub. In order to enable streaming, one must also set `--max_train_samples`. Here is an example command (from [this blog article](https://huggingface.co/blog/train-your-controlnet)): ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="runs/uncanny-faces-{timestamp}" export HUB_MODEL_ID="controlnet-uncanny-faces" python3 train_controlnet_flax.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=multimodalart/facesyntheticsspigacaptioned \ --streaming \ --conditioning_image_column=spiga_seg \ --image_column=image \ --caption_column=image_caption \ --resolution=512 \ --max_train_samples 100000 \ --learning_rate=1e-5 \ --train_batch_size=1 \ --revision="flax" \ --report_to="wandb" \ --tracker_project_name=$HUB_MODEL_ID ``` Note, however, that the performance of the TPUs might get bottlenecked as streaming with `datasets` is not optimized for images. For ensuring maximum throughput, we encourage you to explore the following options: * [Webdataset](https://webdataset.github.io/webdataset/) * [TorchData](https://github.com/pytorch/data) * [TensorFlow Datasets](https://www.tensorflow.org/datasets/tfless_tfds) When work with a larger dataset, you may need to run training process for a long time and it’s useful to save regular checkpoints during the process. You can use the following argument to enable intermediate checkpointing: ```bash --checkpointing_steps=500 ``` This will save the trained model in subfolders of your output_dir. Subfolder names is the number of steps performed so far; for example: a checkpoint saved after 500 training steps would be saved in a subfolder named 500 You can then start your training from this saved checkpoint with ```bash --controlnet_model_name_or_path="./control_out/500" ``` We support training with the Min-SNR weighting strategy proposed in [Efficient Diffusion Training via Min-SNR Weighting Strategy](https://arxiv.org/abs/2303.09556) which helps to achieve faster convergence by rebalancing the loss. To use it, one needs to set the `--snr_gamma` argument. The recommended value when using it is `5.0`. We also support gradient accumulation - it is a technique that lets you use a bigger batch size than your machine would normally be able to fit into memory. You can use `gradient_accumulation_steps` argument to set gradient accumulation steps. The ControlNet author recommends using gradient accumulation to achieve better convergence. Read more [here](https://github.com/lllyasviel/ControlNet/blob/main/docs/train.md#more-consideration-sudden-converge-phenomenon-and-gradient-accumulation). You can **profile your code** with: ```bash --profile_steps==5 ``` Refer to the [JAX documentation on profiling](https://jax.readthedocs.io/en/latest/profiling.html). To inspect the profile trace, you'll have to install and start Tensorboard with the profile plugin: ```bash pip install tensorflow tensorboard-plugin-profile tensorboard --logdir runs/fill-circle-100steps-20230411_165612/ ``` The profile can then be inspected at http://localhost:6006/#profile Sometimes you'll get version conflicts (error messages like `Duplicate plugins for name projector`), which means that you have to uninstall and reinstall all versions of Tensorflow/Tensorboard (e.g. with `pip uninstall tensorflow tf-nightly tensorboard tb-nightly tensorboard-plugin-profile && pip install tf-nightly tbp-nightly tensorboard-plugin-profile`). Note that the debugging functionality of the Tensorboard `profile` plugin is still under active development. Not all views are fully functional, and for example the `trace_viewer` cuts off events after 1M (which can result in all your device traces getting lost if you for example profile the compilation step by accident). ### Troubleshoot your TPU VM **VERY IMPORTANT** - Only one process can access the TPU cores at a time. This means that if multiple team members are trying to connect to the TPU cores, you will get errors such as: ``` libtpu.so already in used by another process. Not attempting to load libtpu.so in this process. ``` We recommend every team member create her/his own virtual environment, but only one person should run the heavy training processes. Also, please take turns when setting up the TPUv4-8 so that everybody can verify that JAX is correctly installed. If your team members are not currently using the TPU but you still get this error message. You should kill the process that is using the TPU with ``` kill -9 PID ``` you will need to replace the term “PID” with the PID of the process that uses TPU. In most cases, this information is included in the error message. For example, if you get ``` The TPU is already in use by a process with pid 1378725. Not attempting to load libtpu.so in this process. ``` you can do ``` kill -9 1378725 ``` You can also use the below command to find processes using each of the TPU chips (e.g. `/dev/accel0` is one of the TPU chips) ``` sudo lsof -w /dev/accel0 ``` To kill all the processes using `/dev/accel0` ``` sudo lsof -t /dev/accel0 | xargs kill -9 ``` If Python is not able to detect your TPU device (i.e. when you do `jax.device_count()` and it outputs `0`), it might be because you have no rights to access the tpu logs, or you have a dangling tpu lock file. Run these commands usually fix the issue ``` sudo rm -f /tmp/libtpu_lockfile ``` ``` sudo chmod o+w /tmp/tpu_logs/ ``` <div id="how-to-make-a-submission"> <h2> How to Make a Submission </h2> </div> To make a full submission, you need to have the following on Hugging Face Hub: - Model repository with model weights and model card, - (Optional) Dataset repository with dataset card, - A Hugging Face Space that lets others interact with your model. ### Pushing model weights and the model card to Hub **If you are using the training script (`train_controlnet_flax.py`) provided in this directory** Enabling `push_to_hub` argument in the training arguments will: - Create a model repository locally, and remotely on Hugging Face Hub, - Create a model card and write it to the local model repository, - Save your model to the local model repository, - Push the local repository to Hugging Face Hub. Your automatically generated model card will look like below 👇 ![Model Card](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/jax_model_card.png). You can edit the model card to be more informative. Model cards that are more informative than the others will carry more weight during evaluation. **If you have trained a custom model and not used the script** You need to authenticate yourself with `huggingface-cli login` as instructed above. If you are using one of the available model classes from `diffusers`, save your model with `save_pretrained` method of your model. ```python model.save_pretrained("path_to_your_model_repository") ``` After saving your model to a folder, you can simply use below script to push your model to the Hub 👇 ```python from huggingface_hub import create_repo, upload_folder create_repo("username/my-awesome-model") upload_folder( folder_path="path_to_your_model_repository", repo_id="username/my-awesome-model" ) ``` This will push your model to Hub. After pushing your model to Hub, you need to create the model card yourself. You can use graphical interface to edit the model card. ![Edit Model Card](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/edit_model_card.png) Every model card consists of two sections, metadata and free text. You can edit metadata from the sections in graphical UI. If you have saved your model using `save_pretrained`, you do not need to provide `pipeline_tag` and `library_name`. If not, provide `pipeline_tag`, `library_name` and dataset if it exists on Hugging Face Hub. Aside from these, you need to add the `jax-diffusers-event` to `tags` section. ``` --- license: apache-2.0 library_name: diffusers tags: - jax-diffusers-event datasets: - red_caps pipeline_tag: text-to-image --- ``` ![Edit Metadata](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/edit_metadata.png) ### Creating our Space <h4> Writing our Application </h4> We will use [Gradio](https://gradio.app/) to build our applications. Gradio has two main APIs: `Interface` and `Blocks`. `Interface` is a high-level API that lets you create an interface with few lines of code, and `Blocks` is a lower-level API that gives you more flexibility over interfaces you can build. The code should be included in a file called `app.py`. Let's try to create a ControlNet app as an example. The `Interface` API simply works like below 👇 ```python import gradio as gr # inference function takes prompt, negative prompt and image def infer(prompt, negative_prompt, image): # implement your inference function here return output_image # you need to pass inputs and outputs according to inference function gr.Interface(fn = infer, inputs = ["text", "text", "image"], outputs = "image").launch() ``` You can customize your interface by passing `title`, `description` and `examples` to the `Interface` function. ```python title = "ControlNet on Canny Filter" description = "This is a demo on ControlNet based on canny filter." # you need to pass your examples according to your inputs # each inner list is one example, each element in the list corresponding to a component in the `inputs`. examples = [["a cat with cake texture", "low quality", "cat_image.png"]] gr.Interface(fn = infer, inputs = ["text", "text", "image"], outputs = "image", title = title, description = description, examples = examples, theme='gradio/soft').launch() ``` Your interface will look like below 👇 ![ControlNet](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio_controlnet.png) With Blocks, you can add markdown, tabs, components under columns and rows and more. Assume we have two ControlNets and we want to include them in one Space. We will have them under different tabs under one demo like below 👇 ```python import gradio as gr def infer_segmentation(prompt, negative_prompt, image): # your inference function for segmentation control return im def infer_canny(prompt, negative_prompt, image): # your inference function for canny control return im with gr.Blocks(theme='gradio/soft') as demo: gr.Markdown("## Stable Diffusion with Different Controls") gr.Markdown("In this app, you can find different ControlNets with different filters. ") with gr.Tab("ControlNet on Canny Filter "): prompt_input_canny = gr.Textbox(label="Prompt") negative_prompt_canny = gr.Textbox(label="Negative Prompt") canny_input = gr.Image(label="Input Image") canny_output = gr.Image(label="Output Image") submit_btn = gr.Button(value = "Submit") canny_inputs = [prompt_input_canny, negative_prompt_canny, canny_input] submit_btn.click(fn=infer_canny, inputs=canny_inputs, outputs=[canny_output]) with gr.Tab("ControlNet with Semantic Segmentation"): prompt_input_seg = gr.Textbox(label="Prompt") negative_prompt_seg = gr.Textbox(label="Negative Prompt") seg_input = gr.Image(label="Image") seg_output = gr.Image(label="Output Image") submit_btn = gr.Button(value = "Submit") seg_inputs = [prompt_input_seg, negative_prompt_seg, seg_input] submit_btn.click(fn=infer_segmentation, inputs=seg_inputs, outputs=[seg_output]) demo.launch() ``` Above demo will look like below 👇 ![Gradio Blocks](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio_controlnet_blocks.png) #### Creating our Space After our application is written, we can create a Hugging Face Space to host our app. You can go to [huggingface.co](http://huggingface.co), click on your profile on top right and select “New Space”. ![New Space](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/new_space.png) We can name our Space, pick a license and select Space SDK as “Gradio”. ![Space Configuration](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/space_config.png) After creating the Space, you can either use the instructions below to clone the repository locally, add your files and push, or use the graphical interface to create the files and write the code in the browser. ![Spaces Landing](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/repository_landing.png) To upload your application file, pick “Add File” and drag and drop your file. ![New Space Landing](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/add_file.png) Lastly, we have to create a file called `requirements.txt` and add requirements of our project. Make sure to install below versions of jax, diffusers and other dependencies like below. ``` -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html jax[cuda11_cudnn805] jaxlib git+https://github.com/huggingface/diffusers@main opencv-python transformers flax ``` We will give you GPU grant so your application can run on GPU. We have a leaderboard hosted [here](https://huggingface.co/spaces/jax-diffusers-event/leaderboard) and we will be distributing prizes from this leaderboard. To make your Space show up on the leaderboard, simply edit `README.md` of your Space to have the tag `jax-diffusers-event` under tags like below 👇 ``` --- title: Canny Coyo1m emoji: 💜 ... tags: - jax-diffusers-event --- ``` ## Prizes For this sprint we will have many prizes. We will pick the first ten projects from [this leaderboard](https://huggingface.co/spaces/jax-diffusers-event/leaderboard), so you should tag your Space for the leaderboard to make your submission complete, as instructed in above section. The projects are ranked by likes, so we will amplify the visibility of all projects for people to cast their votes by leaving a like on the Space. We will pick the first ten projects from the ranking, and the jury will cast their votes to determine the first three places. These projects will be highlighted by both Google and Hugging Face. Elaborately made interfaces as well as projects with open-sourced codebases and models will likely increase the chance of winning prizes. Prizes are as follows and are given to each team member 👇 **First Place**: A voucher of $150 that you can spend at [Hugging Face Store](https://store.huggingface.co/), Hugging Face Hub PRO subscription for one year, Natural Language Processing with Transformers book **Second Place**: A voucher of $125 that you can spend at [Hugging Face Store](https://store.huggingface.co/), Hugging Face Hub PRO subscription for one year **Third Place**: A voucher of $100 that you can spend at [Hugging Face Store](https://store.huggingface.co/), Hugging Face Hub PRO subscription for one year The first ten projects on the leaderboard (regardless of jury decision) will win a merch set exclusively made for this sprint by Hugging Face, and an separate JAX merch set from Google. ## Jury Our jury panel for this sprint included: 1. Robin Rombach, Stability AI 2. Huiwen Chang, Google Research 3. Jun-Yan Zhu, Carnegie Mellon University 4. Merve Noyan, Hugging Face ## FAQ In this section, We are collecting answers to frequently asked questions from our discord channel. Contributions welcome! ### How to Use VSCode with TPU VM? You can follow this [general guide](https://medium.com/@ivanzhd/vscode-sftp-connection-to-compute-engine-on-google-cloud-platform-gcloud-9312797d56eb) on how to use VSCode remote to connect to Google Cloud VMs. Once it's set up, you can develop on the TPU VM using VSCode. To get your external IP, use this command: ``` gcloud compute tpus tpu-vm describe <node_name> --zone=<zone> ``` It should be listed under 'accessConfig' -> 'externalIp' ### How to Test Your Code Locally? Since team members are sharing the TPU VM, it might be practical to write and test your code locally on a CPU while your teammates are running the training process on the VM. To run local testing, it is important to set the `xla_force_host_platform_device_count` flag to `4`. Read more on the [documentation](https://jax.readthedocs.io/en/latest/jax-101/06-parallelism.html#aside-hosts-and-devices-in-jax). ## Sprint winners Top 10 projects (based on the likes on their demo applications) are available on this [leaderboard](https://huggingface.co/spaces/jax-diffusers-event/leaderboard). We tooks this leaderboard to our [jury](#jury) to judge the top 10 projects based on several factors such as open-source model checkpoints, datasets, and codebases, completeness of the model and dataset cards, etc. As a result, following three projects emerged as the winners: 1. [ControlNet for Interior Design](https://huggingface.co/spaces/controlnet-interior-design/controlnet-seg) 2. [ControlNet for Adjusting Brightness](https://huggingface.co/spaces/ioclab/brightness-controlnet) 3. [Stable Diffusion with Hand Control](https://huggingface.co/spaces/vllab/controlnet-hands)
diffusion-models-class/units/en/events/4.mdx/0
{ "file_path": "diffusion-models-class/units/en/events/4.mdx", "repo_id": "diffusion-models-class", "token_count": 11592 }
139
import wandb import numpy as np import torch, torchvision import torch.nn.functional as F from PIL import Image from tqdm.auto import tqdm from fastcore.script import call_parse from torchvision import transforms from diffusers import DDPMPipeline from diffusers import DDIMScheduler from datasets import load_dataset from matplotlib import pyplot as plt @call_parse def train( image_size = 256, batch_size = 16, grad_accumulation_steps = 2, num_epochs = 1, start_model = "google/ddpm-bedroom-256", dataset_name = "huggan/wikiart", device='cuda', model_save_name='wikiart_1e', wandb_project='dm_finetune', log_samples_every = 250, save_model_every = 2500, ): # Wandb pour l'enregistrement wandb.init(project=wandb_project, config=locals()) # Préparer le modèle pré-entraîné image_pipe = DDPMPipeline.from_pretrained(start_model); image_pipe.to(device) # Obtenir un planificateur pour l'échantillonnage sampling_scheduler = DDIMScheduler.from_config(start_model) sampling_scheduler.set_timesteps(num_inference_steps=50) # Préparer le jeu de données dataset = load_dataset(dataset_name, split="train") preprocess = transforms.Compose( [ transforms.Resize((image_size, image_size)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def transform(examples): images = [preprocess(image.convert("RGB")) for image in examples["image"]] return {"images": images} dataset.set_transform(transform) train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True) # Optimiseur et planificateur du taux d'apprentissage optimizer = torch.optim.AdamW(image_pipe.unet.parameters(), lr=1e-5) scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9) for epoch in range(num_epochs): for step, batch in tqdm(enumerate(train_dataloader), total=len(train_dataloader)): # Obtenir des images propres clean_images = batch['images'].to(device) # Exemple de bruit à ajouter aux images noise = torch.randn(clean_images.shape).to(clean_images.device) bs = clean_images.shape[0] # Échantillonner un pas de temps aléatoire pour chaque image timesteps = torch.randint(0, image_pipe.scheduler.num_train_timesteps, (bs,), device=clean_images.device).long() # Ajouter du bruit aux images propres en fonction de la grandeur du bruit à chaque étape # (il s'agit du processus de diffusion vers l'avant). noisy_images = image_pipe.scheduler.add_noise(clean_images, noise, timesteps) # Obtenir la prédiction du modèle pour le bruit noise_pred = image_pipe.unet(noisy_images, timesteps, return_dict=False)[0] # Comparez la prédiction avec le bruit réel loss = F.mse_loss(noise_pred, noise) # Log la perte wandb.log({'loss':loss.item()}) # Calculer les gradients loss.backward() # Accumulation du gradient : mettre à jour seulement tous les grad_accumulation_steps if (step+1)%grad_accumulation_steps == 0: optimizer.step() optimizer.zero_grad() # Enregistrer occasionnellement des échantillons if (step+1)%log_samples_every == 0: x = torch.randn(8, 3, 256, 256).to(device) # Batch de 8 for i, t in tqdm(enumerate(sampling_scheduler.timesteps)): model_input = sampling_scheduler.scale_model_input(x, t) with torch.no_grad(): noise_pred = image_pipe.unet(model_input, t)["sample"] x = sampling_scheduler.step(noise_pred, t, x).prev_sample grid = torchvision.utils.make_grid(x, nrow=4) im = grid.permute(1, 2, 0).cpu().clip(-1, 1)*0.5 + 0.5 im = Image.fromarray(np.array(im*255).astype(np.uint8)) wandb.log({'Sample generations': wandb.Image(im)}) # Sauvegarde occasionnelle du modèle if (step+1)%save_model_every == 0: image_pipe.save_pretrained(model_save_name+f'step_{step+1}') # Mise à jour du taux d'apprentissage pour l'époque suivante scheduler.step() # Sauvegarder le pipeline une dernière fois image_pipe.save_pretrained(model_save_name) # Terminer l'éxécution wandb.finish()
diffusion-models-class/units/fr/unit2/finetune_model.py/0
{ "file_path": "diffusion-models-class/units/fr/unit2/finetune_model.py", "repo_id": "diffusion-models-class", "token_count": 2153 }
140
<jupyter_start><jupyter_text>Diffusion pour l'audio Dans ce *notebook*, nous allons jeter un bref coup d'œil à la génération d'audio avec des modèles de diffusion.Ce que vous allez apprendre :- Comment l'audio est représenté dans un ordinateur- Les méthodes de conversion entre les données audio brutes et les spectrogrammes- Comment préparer un chargeur de données avec une fonction personnalisée pour convertir des tranches d'audio en spectrogrammes- *Finetuner* un modèle de diffusion audio existant sur un genre de musique spécifique- Télécharger votre pipeline personnalisé sur le Hub d'Hugging FaceMise en garde : il s'agit principalement d'un objectif pédagogique - rien ne garantit que notre modèle sonnera bien 😉Commençons ! Configuration et importations<jupyter_code># !pip install -q datasets diffusers torchaudio accelerate import torch, random import numpy as np import torch.nn.functional as F from tqdm.auto import tqdm from IPython.display import Audio from matplotlib import pyplot as plt from diffusers import DiffusionPipeline from torchaudio import transforms as AT from torchvision import transforms as IT<jupyter_output><empty_output><jupyter_text>Echantillonnage à partir d'un pipeline audio pré-entraînéCommençons par suivre la [documentation](https://huggingface.co/docs/diffusers/api/pipelines/audio_diffusion) pour charger un modèle de diffusion audio préexistant :<jupyter_code># Chargement d'un pipeline de diffusion audio pré-entraîné device = "cuda" if torch.cuda.is_available() else "cpu" pipe = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-instrumental-hiphop-256").to(device)<jupyter_output><empty_output><jupyter_text>Comme pour les pipelines que nous avons utilisés dans les unités précédentes, nous pouvons créer des échantillons en appelant le pipeline comme suit :<jupyter_code># Échantillonner à partir du pipeline et afficher les résultats output = pipe() display(output.images[0]) display(Audio(output.audios[0], rate=pipe.mel.get_sample_rate()))<jupyter_output><empty_output><jupyter_text>Ici, l'argument `rate` spécifie la fréquence d'échantillonnage de l'audio ; nous y reviendrons plus tard. Vous remarquerez également que le pipeline renvoie plusieurs choses. Que se passe-t-il ici ? Examinons de plus près les deux sorties.La première est un tableau de données, représentant l'audio généré :<jupyter_code># Le tableau audio output.audios[0].shape<jupyter_output><empty_output><jupyter_text>La seconde ressemble à une image en niveaux de gris :<jupyter_code># L'image de sortie (spectrogramme) output.images[0].size<jupyter_output><empty_output><jupyter_text>Cela nous donne un aperçu du fonctionnement de ce pipeline. L'audio n'est pas directement généré par diffusion. Au lieu de cela, le pipeline a le même type d'UNet 2D que les pipelines de génération d'images inconditionnelles que nous avons vus dans l'unité 1, qui est utilisé pour générer le spectrogramme, qui est ensuite post-traité dans l'audio final.Le pipeline possède un composant supplémentaire qui gère ces conversions, auquel nous pouvons accéder via `pipe.mel` :<jupyter_code>pipe.mel<jupyter_output><empty_output><jupyter_text>De l'audio à l'image et inversementUne "forme d'onde" encode les échantillons audio bruts dans le temps. Il peut s'agir du signal électrique reçu d'un microphone, par exemple. Travailler avec cette représentation du "domaine temporel" peut s'avérer délicat, c'est pourquoi il est courant de la convertir sous une autre forme, communément appelée spectrogramme. Un spectrogramme montre l'intensité de différentes fréquences (axe y) en fonction du temps (axe x) :<jupyter_code># Calculer et afficher un spectrogramme pour notre échantillon audio généré en utilisant torchaudio spec_transform = AT.Spectrogram(power=2) spectrogram = spec_transform(torch.tensor(output.audios[0])) print(spectrogram.min(), spectrogram.max()) log_spectrogram = spectrogram.log() plt.imshow(log_spectrogram[0], cmap='gray');<jupyter_output>tensor(0.) tensor(6.0842)<jupyter_text>Le spectrogramme que nous venons de créer contient des valeurs comprises entre 0,0000000000001 et 1, la plupart d'entre elles étant proches de la limite inférieure de cette plage. Ce n'est pas l'idéal pour la visualisation ou la modélisation. En fait, nous avons dû prendre le logarithme de ces valeurs pour obtenir un tracé en niveaux de gris qui montre des détails. Pour cette raison, nous utilisons généralement un type spécial de spectrogramme appelé Mel spectrogramme, qui est conçu pour capturer les types d'informations qui sont importantes pour l'audition humaine en appliquant certaines transformations aux différentes composantes de fréquence du signal. *Quelques transformations audio de la documentation [torchaudio](https://pytorch.org/audio/stable/transforms.html)* Heureusement pour nous, nous n'avons pas besoin de nous préoccuper de ces transformations, la fonctionnalité `mel` du pipeline s'occupe de ces détails pour nous. En l'utilisant, nous pouvons convertir une image de spectrogramme en audio comme suit :<jupyter_code>a = pipe.mel.image_to_audio(output.images[0]) a.shape<jupyter_output><empty_output><jupyter_text>Nous pouvons également convertir un tableau de données audio en images de spectrogramme en chargeant d'abord les données audio brutes, puis en appelant la fonction `audio_slice_to_image()`. Les clips plus longs sont automatiquement découpés en morceaux de la bonne longueur pour produire une image de spectrogramme de 256x256 :<jupyter_code>pipe.mel.load_audio(raw_audio=a) im = pipe.mel.audio_slice_to_image(0) im<jupyter_output><empty_output><jupyter_text>L'audio est représenté sous la forme d'un long tableau de nombres. Pour l'écouter nous avons besoin d'une autre information clé : la fréquence d'échantillonnage. Combien d'échantillons (valeurs individuelles) utilisons-nous pour représenter une seconde d'audio ?Nous pouvons voir la fréquence d'échantillonnage utilisée lors de l'entraînement de ce pipeline avec :<jupyter_code>sample_rate_pipeline = pipe.mel.get_sample_rate() sample_rate_pipeline<jupyter_output><empty_output><jupyter_text>Si nous spécifions mal la fréquence d'échantillonnage, nous obtenons un son accéléré ou ralenti :<jupyter_code>display(Audio(output.audios[0], rate=44100)) # Vitesse x2<jupyter_output><empty_output><jupyter_text>*Finetuning* du pipelineMaintenant que nous avons une compréhension approximative du fonctionnement du pipeline, nous allons le *finetuner* sur de nouvelles données audio !Le jeu de données est une collection de clips audio de différents genres, que nous pouvons charger depuis le Hub de la manière suivante :<jupyter_code>from datasets import load_dataset dataset = load_dataset('lewtun/music_genres', split='train') dataset<jupyter_output>Using custom data configuration lewtun--music_genres-2cfa9201f94788d8 Found cached dataset parquet (/home/ubuntu/.cache/huggingface/datasets/lewtun___parquet/lewtun--music_genres-2cfa9201f94788d8/0.0.0/2a3b91fbd88a2c90d1dbbb32b460cf621d31bd5b05b934492fdef7d8d6f236ec)<jupyter_text>Vous pouvez utiliser le code ci-dessous pour voir les différents genres dans le jeu de données et combien d'échantillons sont contenus dans chacun d'eux :<jupyter_code>for g in list(set(dataset['genre'])): print(g, sum(x==g for x in dataset['genre']))<jupyter_output>Pop 945 Blues 58 Punk 2582 Old-Time / Historic 408 Experimental 1800 Folk 1214 Electronic 3071 Spoken 94 Classical 495 Country 142 Instrumental 1044 Chiptune / Glitch 1181 International 814 Ambient Electronic 796 Jazz 306 Soul-RnB 94 Hip-Hop 1757 Easy Listening 13 Rock 3095<jupyter_text>Le jeu de données contient les données audio sous forme de tableaux :<jupyter_code>audio_array = dataset[0]['audio']['array'] sample_rate_dataset = dataset[0]['audio']['sampling_rate'] print('Audio array shape:', audio_array.shape) print('Sample rate:', sample_rate_dataset) display(Audio(audio_array, rate=sample_rate_dataset))<jupyter_output>Audio array shape: (1323119,) Sample rate: 44100<jupyter_text>Notez que la fréquence d'échantillonnage de cet audio est plus élevée. Si nous voulons utiliser le pipeline existant, nous devrons le "rééchantillonner" pour qu'il corresponde à la fréquence d'échantillonnage. Les clips sont également plus longs que ceux pour lesquels le pipeline est configuré. Heureusement, lorsque nous chargeons l'audio à l'aide de `pipe.mel`, il découpe automatiquement le clip en sections plus petites :<jupyter_code>a = dataset[0]['audio']['array'] # Obtenir le tableau audio pipe.mel.load_audio(raw_audio=a) # Le charger avec pipe.mel pipe.mel.audio_slice_to_image(0) # Visualiser la première "tranche" sous forme de spectrogramme<jupyter_output><empty_output><jupyter_text>Nous devons penser à ajuster le taux d'échantillonnage, car les données de ce jeu de données comportent deux fois plus d'échantillons par seconde :<jupyter_code>sample_rate_dataset = dataset[0]['audio']['sampling_rate'] sample_rate_dataset<jupyter_output><empty_output><jupyter_text>Ici, nous utilisons les transformations de torchaudio (importées sous le nom AT) pour effectuer le rééchantillonnage, le pipeline `mel` pour transformer l'audio en image et les transformations de torchvision (importées sous le nom IT) pour transformer les images en tenseurs. Nous obtenons ainsi une fonction qui transforme un clip audio en un tenseur de spectrogramme que nous pouvons utiliser pour nous entraîner :<jupyter_code>resampler = AT.Resample(sample_rate_dataset, sample_rate_pipeline, dtype=torch.float32) to_t = IT.ToTensor() def to_image(audio_array): audio_tensor = torch.tensor(audio_array).to(torch.float32) audio_tensor = resampler(audio_tensor) pipe.mel.load_audio(raw_audio=np.array(audio_tensor)) num_slices = pipe.mel.get_number_of_slices() slice_idx = random.randint(0, num_slices-1) # Piocher une tranche aléatoire à chaque fois (à l'exception de la dernière tranche courte) im = pipe.mel.audio_slice_to_image(slice_idx) return im<jupyter_output><empty_output><jupyter_text>Nous utiliserons notre fonction `to_image()` dans le cadre d'une fonction collate personnalisée pour transformer notre jeu de données en un chargeur de données utilisable pour l'entraînement. La fonction collate définit la manière de transformer un batch d'exemples du jeu de données en un batch final de données prêtes à être entraînées. Dans ce cas, nous transformons chaque échantillon audio en une image de spectrogramme et nous empilons les tenseurs résultants :<jupyter_code>def collate_fn(examples): # vers l'image -> vers le tenseur -> redimensionnement vers (-1, 1) -> empiler dans le batch audio_ims = [to_t(to_image(x['audio']['array']))*2-1 for x in examples] return torch.stack(audio_ims) # Créer un jeu de données avec uniquement le genre de chansons 'Chiptune / Glitch' batch_size=4 # 4 sur Colab, 12 sur A100 chosen_genre = 'Electronic' # <<< Essayer d'entraîner sur des genres différents <<< indexes = [i for i, g in enumerate(dataset['genre']) if g == chosen_genre] filtered_dataset = dataset.select(indexes) dl = torch.utils.data.DataLoader(filtered_dataset.shuffle(), batch_size=batch_size, collate_fn=collate_fn, shuffle=True) batch = next(iter(dl)) print(batch.shape)<jupyter_output>torch.Size([4, 1, 256, 256])<jupyter_text>**NB : Vous devrez utiliser une taille de batch inférieure (par exemple 4) à moins que vous ne disposiez d'une grande quantité de vRAM GPU.** Boucle d'entraînementVoici une boucle d'entraînement simple qui s'exécute à travers le chargeur de données pour quelques époques afin de *finetuner* le pipeline UNet. Vous pouvez également ignorer cette cellule et charger le pipeline avec le code de la cellule suivante.<jupyter_code>epochs = 3 lr = 1e-4 pipe.unet.train() pipe.scheduler.set_timesteps(1000) optimizer = torch.optim.AdamW(pipe.unet.parameters(), lr=lr) for epoch in range(epochs): for step, batch in tqdm(enumerate(dl), total=len(dl)): # Préparer les images d'entrée clean_images = batch.to(device) bs = clean_images.shape[0] # Échantillonner un pas de temps aléatoire pour chaque image timesteps = torch.randint( 0, pipe.scheduler.num_train_timesteps, (bs,), device=clean_images.device ).long() # Ajouter du bruit aux images propres en fonction de l'ampleur du bruit à chaque étape noise = torch.randn(clean_images.shape).to(clean_images.device) noisy_images = pipe.scheduler.add_noise(clean_images, noise, timesteps) # Obtenir la prédiction du modèle noise_pred = pipe.unet(noisy_images, timesteps, return_dict=False)[0] # Calculer la perte loss = F.mse_loss(noise_pred, noise) loss.backward(loss) # Mise à jour des paramètres du modèle à l'aide de l'optimiseur optimizer.step() optimizer.zero_grad() # OU : Charger la version entraînée précédemment pipe = DiffusionPipeline.from_pretrained("johnowhitaker/Electronic_test").to(device) output = pipe() display(output.images[0]) display(Audio(output.audios[0], rate=22050)) # Créer un échantillon plus long en passant un tenseur de bruit de départ avec une forme différente noise = torch.randn(1, 1, pipe.unet.sample_size[0],pipe.unet.sample_size[1]*4).to(device) output = pipe(noise=noise) display(output.images[0]) display(Audio(output.audios[0], rate=22050))<jupyter_output><empty_output><jupyter_text>Ce ne sont pas les résultats les plus impressionnants mais c'est un début :) Essayez d'ajuster le taux d'apprentissage et le nombre d'époques, et partagez vos meilleurs résultats sur Discord pour que nous puissions nous améliorer ensemble ! Quelques éléments à prendre en compte- Nous travaillons avec des images de spectrogrammes carrés de 256 pixels ce qui limite la taille de nos batchs. Pouvez-vous récupérer de l'audio de qualité suffisante à partir d'un spectrogramme de 128x128 ?- Au lieu d'une augmentation aléatoire de l'image, nous choisissons à chaque fois des tranches différentes du clip audio, mais cela pourrait-il être amélioré avec différents types d'augmentation lorsque l'on s'entraîne pendant de nombreuses époques ?- Comment pourrions-nous utiliser cette méthode pour générer des clips plus longs ? Peut-être pourriez-vous générer un clip de départ de 5 secondes, puis utiliser des idées inspirées de l'inpainting pour continuer à générer des segments audio supplémentaires à partir du clip initial...- Quel est l'équivalent d'une image à image dans ce contexte de diffusion de spectrogrammes ? Pousser sur le HubUne fois que vous êtes satisfait de votre modèle, vous pouvez le sauvegarder et le transférer sur le Hub pour que d'autres personnes puissent en profiter :<jupyter_code>from huggingface_hub import get_full_repo_name, HfApi, create_repo, ModelCard # Choisir un nom pour le modèle model_name = "audio-diffusion-electronic" hub_model_id = get_full_repo_name(model_name) # Sauvegarder le pipeline localement pipe.save_pretrained(model_name) # Inspecter le contenu du dossier !ls {model_name} # Créer un dépôt create_repo(hub_model_id) # Télécharger les fichiers api = HfApi() api.upload_folder( folder_path=f"{model_name}/scheduler", path_in_repo="scheduler", repo_id=hub_model_id ) api.upload_folder( folder_path=f"{model_name}/mel", path_in_repo="mel", repo_id=hub_model_id ) api.upload_folder(folder_path=f"{model_name}/unet", path_in_repo="unet", repo_id=hub_model_id) api.upload_file( path_or_fileobj=f"{model_name}/model_index.json", path_in_repo="model_index.json", repo_id=hub_model_id, ) # Pousser une carte de modèle content = f""" --- license: mit tags: - pytorch - diffusers - unconditional-audio-generation - diffusion-models-class --- # Model Card for Unit 4 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional audio generation of music in the genre {chosen_genre} ## Usage ```python from IPython.display import Audio from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("{hub_model_id}") output = pipe() display(output.images[0]) display(Audio(output.audios[0], rate=pipe.mel.get_sample_rate())) ``` """ card = ModelCard(content) card.push_to_hub(hub_model_id)<jupyter_output><empty_output>
diffusion-models-class/units/fr/unit4/diffusion_for_audio.ipynb/0
{ "file_path": "diffusion-models-class/units/fr/unit4/diffusion_for_audio.ipynb", "repo_id": "diffusion-models-class", "token_count": 5905 }
141
<jupyter_start><jupyter_text>Tout assembler (PyTorch) Installez la bibliothèque 🤗 *Transformers* pour exécuter ce *notebook*.<jupyter_code>!pip install transformers[sentencepiece] from transformers import AutoTokenizer checkpoint = "tblard/tf-allocine" tokenizer = AutoTokenizer.from_pretrained(checkpoint) sequence = "J'ai attendu un cours d’HuggingFace toute ma vie." model_inputs = tokenizer(sequence) sequence = "J'ai attendu un cours d’HuggingFace toute ma vie." model_inputs = tokenizer(sequence) sequences = [ "J'ai attendu un cours de HuggingFace toute ma vie.", "Moi aussi !", ] model_inputs = tokenizer(sequences) # Remplit les séquences jusqu'à la longueur maximale de la séquence model_inputs = tokenizer(sequences, padding="longest") # Remplit les séquences jusqu'à la longueur maximale du modèle (512 pour BERT ou DistilBERT) model_inputs = tokenizer(sequences, padding="max_length") # Remplit les séquences jusqu'à la longueur maximale spécifiée model_inputs = tokenizer(sequences, padding="max_length", max_length=8) sequences = [ "J'ai attendu un cours de HuggingFace toute ma vie.", "Moi aussi !", ] # Tronque les séquences qui sont plus longues que la longueur maximale du modèle # (512 pour BERT ou DistilBERT) model_inputs = tokenizer(sequences, truncation=True) # Tronque les séquences qui sont plus longues que la longueur maximale spécifiée model_inputs = tokenizer(sequences, max_length=8, truncation=True) sequences = [ "J'ai attendu un cours de HuggingFace toute ma vie.", "Moi aussi !", ] # Retourne des tenseurs PyTorch model_inputs = tokenizer(sequences, padding=True, return_tensors="pt") # Retourne des tenseurs TensorFlow model_inputs = tokenizer(sequences, padding=True, return_tensors="tf") # Retourne des tableaux NumPy model_inputs = tokenizer(sequences, padding=True, return_tensors="np") sequence = "J'ai attendu un cours de HuggingFace toute ma vie." model_inputs = tokenizer(sequence) print(model_inputs["input_ids"]) tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) print(ids) print(tokenizer.decode(model_inputs["input_ids"])) print(tokenizer.decode(ids)) import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification checkpoint = "tblard/tf-allocine" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForSequenceClassification.from_pretrained(checkpoint, from_tf=True) sequences = [ "J'ai attendu un cours de HuggingFace toute ma vie.", "Moi aussi !", ] tokens = tokenizer(sequences, padding=True, truncation=True, return_tensors="pt") output = model(**tokens)<jupyter_output><empty_output>
notebooks/course/fr/chapter2/section6_pt.ipynb/0
{ "file_path": "notebooks/course/fr/chapter2/section6_pt.ipynb", "repo_id": "notebooks", "token_count": 974 }
142
<jupyter_start><jupyter_text>Création de votre propre jeu de données Installez les bibliothèques 🤗 Transformers et 🤗 Datasets pour exécuter ce *notebook*.<jupyter_code>!pip install datasets evaluate transformers[sentencepiece] !apt install git-lfs<jupyter_output><empty_output><jupyter_text>Vous aurez besoin de configurer git, adaptez votre email et votre nom dans la cellule suivante.<jupyter_code>!git config --global user.email "[email protected]" !git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Vous devrez également être connecté au *Hub* d'Hugging Face. Exécutez ce qui suit et entrez vos informations d'identification.<jupyter_code>from huggingface_hub import notebook_login notebook_login() !pip install requests import requests url = "https://api.github.com/repos/huggingface/datasets/issues?page=1&per_page=1" response = requests.get(url) response.status_code response.json() GITHUB_TOKEN = xxx # Copiez votre jeton GitHub ici headers = {"Authorization": f"token {GITHUB_TOKEN}"} import time import math from pathlib import Path import pandas as pd from tqdm.notebook import tqdm def fetch_issues( owner="huggingface", repo="datasets", num_issues=10_000, rate_limit=5_000, issues_path=Path("."), ): if not issues_path.is_dir(): issues_path.mkdir(exist_ok=True) batch = [] all_issues = [] per_page = 100 # Nombre d'issues à renvoyer par page num_pages = math.ceil(num_issues / per_page) base_url = "https://api.github.com/repos" for page in tqdm(range(num_pages)): # Requête avec state=all pour obtenir les questions ouvertes et fermées query = f"issues?page={page}&per_page={per_page}&state=all" issues = requests.get(f"{base_url}/{owner}/{repo}/{query}", headers=headers) batch.extend(issues.json()) if len(batch) > rate_limit and len(all_issues) < num_issues: all_issues.extend(batch) batch = [] # Vider le batch pour la prochaine période de temps print(f"Reached GitHub rate limit. Sleeping for one hour ...") time.sleep(60 * 60 + 1) all_issues.extend(batch) df = pd.DataFrame.from_records(all_issues) df.to_json(f"{issues_path}/{repo}-issues.jsonl", orient="records", lines=True) print( f"Downloaded all the issues for {repo}! Dataset stored at {issues_path}/{repo}-issues.jsonl" ) # En fonction de votre connexion Internet, l'exécution peut prendre plusieurs minutes... fetch_issues() issues_dataset = load_dataset("json", data_files="datasets-issues.jsonl", split="train") issues_dataset sample = issues_dataset.shuffle(seed=666).select(range(3)) # Afficher l'URL et les entrées de la demande de tirage for url, pr in zip(sample["html_url"], sample["pull_request"]): print(f">> URL: {url}") print(f">> Pull request: {pr}\n") issues_dataset = issues_dataset.map( lambda x: {"is_pull_request": False if x["pull_request"] is None else True} ) issue_number = 2792 url = f"https://api.github.com/repos/huggingface/datasets/issues/{issue_number}/comments" response = requests.get(url, headers=headers) response.json() def get_comments(issue_number): url = f"https://api.github.com/repos/huggingface/datasets/issues/{issue_number}/comments" response = requests.get(url, headers=headers) return [r["body"] for r in response.json()] # Tester notre fonction fonctionne comme prévu get_comments(2792) # Selon votre connexion internet, cela peut prendre quelques minutes... issues_with_comments_dataset = issues_dataset.map( lambda x: {"comments": get_comments(x["number"])} ) issues_with_comments_dataset.to_json("issues-datasets-with-comments.jsonl") from huggingface_hub import list_datasets all_datasets = list_datasets() print(f"Number of datasets on Hub: {len(all_datasets)}") print(all_datasets[0]) from huggingface_hub import notebook_login notebook_login() from huggingface_hub import create_repo repo_url = create_repo(name="github-issues", repo_type="dataset") repo_url from huggingface_hub import Repository repo = Repository(local_dir="github-issues", clone_from=repo_url) !cp datasets-issues-with-comments.jsonl github-issues/ repo.lfs_track("*.jsonl") repo.push_to_hub() remote_dataset = load_dataset("lewtun/github-issues", split="train") remote_dataset<jupyter_output><empty_output>
notebooks/course/fr/chapter5/section5.ipynb/0
{ "file_path": "notebooks/course/fr/chapter5/section5.ipynb", "repo_id": "notebooks", "token_count": 1679 }
143
<jupyter_start><jupyter_text>Finetuner un modèle de language masqué (PyTorch) Installez les bibliothèques 🤗 *Datasets*, 🤗 *Transformers* et 🤗 *Accelerate* pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece] !pip install accelerate # Pour exécuter l'entraînement sur TPU, vous devez décommenter la ligne suivante : # !pip install cloud-tpu-client==0.10 torch==1.9.0 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.9-cp37-cp37m-linux_x86_64.whl !apt install git-lfs<jupyter_output><empty_output><jupyter_text>Vous aurez besoin de configurer git, adaptez votre email et votre nom dans la cellule suivante.<jupyter_code>!git config --global user.email "[email protected]" !git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Vous devrez également être connecté au Hub d'Hugging Face. Exécutez ce qui suit et entrez vos informations d'identification.<jupyter_code>from huggingface_hub import notebook_login notebook_login() from transformers import AutoModelForMaskedLM model_checkpoint = "camembert-base" model = AutoModelForMaskedLM.from_pretrained(model_checkpoint) text = "C'est une grande <mask>." from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) import torch inputs = tokenizer(text, return_tensors="pt") token_logits = model(**inputs).logits # Trouver l'emplacement du <mask> et extraire ses logits mask_token_index = torch.where(inputs["input_ids"] == tokenizer.mask_token_id)[1] mask_token_logits = token_logits[0, mask_token_index, :] # Choisir les <mask> candidats avec les logits les plus élevés top_5_tokens = torch.topk(mask_token_logits, 5, dim=1).indices[0].tolist() for token in top_5_tokens: print(f"'>>> {text.replace(tokenizer.mask_token, tokenizer.decode([token]))}'") from datasets import load_dataset imdb_dataset = load_dataset("allocine") imdb_dataset sample = imdb_dataset["train"].shuffle(seed=42).select(range(3)) for row in sample: print(f"\n'>>> Review: {row['review']}'") print(f"'>>> Label: {row['label']}'") def tokenize_function(examples): result = tokenizer(examples["review"]) if tokenizer.is_fast: result["word_ids"] = [result.word_ids(i) for i in range(len(result["input_ids"]))] return result # Utilisez batched=True pour activer le multithreading rapide ! tokenized_datasets = imdb_dataset.map( tokenize_function, batched=True, remove_columns=["review", "label"] ) tokenized_datasets tokenizer.model_max_length chunk_size = 128 # Le découpage produit une liste de listes pour chaque caractéristique tokenized_samples = tokenized_datasets["train"][:3] for idx, sample in enumerate(tokenized_samples["input_ids"]): print(f"'>>> Review {idx} length: {len(sample)}'") concatenated_examples = { k: sum(tokenized_samples[k], []) for k in tokenized_samples.keys() } total_length = len(concatenated_examples["input_ids"]) print(f"'>>> Concatenated reviews length: {total_length}'") chunks = { k: [t[i : i + chunk_size] for i in range(0, total_length, chunk_size)] for k, t in concatenated_examples.items() } for chunk in chunks["input_ids"]: print(f"'>>> Chunk length: {len(chunk)}'") def group_texts(examples): # Concaténation de tous les textes concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} # Calculer la longueur des textes concaténés total_length = len(concatenated_examples[list(examples.keys())[0]]) # Nous laissons tomber le dernier morceau s'il est plus petit que chunk_size total_length = (total_length // chunk_size) * chunk_size # Fractionnement par morceaux de max_len result = { k: [t[i : i + chunk_size] for i in range(0, total_length, chunk_size)] for k, t in concatenated_examples.items() } # Créer une nouvelle colonne d'étiquettes result["labels"] = result["input_ids"].copy() return result lm_datasets = tokenized_datasets.map(group_texts, batched=True) lm_datasets tokenizer.decode(lm_datasets["train"][1]["input_ids"]) from transformers import DataCollatorForLanguageModeling data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15) samples = [lm_datasets["train"][i] for i in range(2)] for sample in samples: _ = sample.pop("word_ids") for chunk in data_collator(samples)["input_ids"]: print(f"\n'>>> {tokenizer.decode(chunk)}'") import collections import numpy as np from transformers import default_data_collator wwm_probability = 0.2 def whole_word_masking_data_collator(features): for feature in features: word_ids = feature.pop("word_ids") # Création d'une correspondance entre les mots et les indices des tokens correspondants mapping = collections.defaultdict(list) current_word_index = -1 current_word = None for idx, word_id in enumerate(word_ids): if word_id is not None: if word_id != current_word: current_word = word_id current_word_index += 1 mapping[current_word_index].append(idx) # Masquer des mots de façon aléatoire mask = np.random.binomial(1, wwm_probability, (len(mapping),)) input_ids = feature["input_ids"] labels = feature["labels"] new_labels = [-100] * len(labels) for word_id in np.where(mask)[0]: word_id = word_id.item() for idx in mapping[word_id]: new_labels[idx] = labels[idx] input_ids[idx] = tokenizer.mask_token_id feature["labels"] = new_labels return default_data_collator(features) samples = [lm_datasets["train"][i] for i in range(2)] batch = whole_word_masking_data_collator(samples) for chunk in batch["input_ids"]: print(f"\n'>>> {tokenizer.decode(chunk)}'") train_size = 10_000 test_size = int(0.1 * train_size) downsampled_dataset = lm_datasets["train"].train_test_split( train_size=train_size, test_size=test_size, seed=42 ) downsampled_dataset from transformers import TrainingArguments batch_size = 64 # Montrer la perte d'entraînement à chaque époque logging_steps = len(downsampled_dataset["train"]) // batch_size model_name = model_checkpoint.split("/")[-1] training_args = TrainingArguments( output_dir=f"{model_name}-finetuned-allocine", overwrite_output_dir=True, evaluation_strategy="epoch", learning_rate=2e-5, weight_decay=0.01, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, push_to_hub=True, fp16=True, logging_steps=logging_steps, ) from transformers import Trainer trainer = Trainer( model=model, args=training_args, train_dataset=downsampled_dataset["train"], eval_dataset=downsampled_dataset["test"], data_collator=data_collator, tokenizer=tokenizer, ) import math eval_results = trainer.evaluate() print(f">>> Perplexity: {math.exp(eval_results['eval_loss']):.2f}") trainer.train() eval_results = trainer.evaluate() print(f">>> Perplexity: {math.exp(eval_results['eval_loss']):.2f}") trainer.push_to_hub() def insert_random_mask(batch): features = [dict(zip(batch, t)) for t in zip(*batch.values())] masked_inputs = data_collator(features) # Créer une nouvelle colonne "masquée" pour chaque colonne du jeu de données return {"masked_" + k: v.numpy() for k, v in masked_inputs.items()} downsampled_dataset = downsampled_dataset.remove_columns(["word_ids"]) eval_dataset = downsampled_dataset["test"].map( insert_random_mask, batched=True, remove_columns=downsampled_dataset["test"].column_names, ) eval_dataset = eval_dataset.rename_columns( { "masked_input_ids": "input_ids", "masked_attention_mask": "attention_mask", "masked_labels": "labels", } ) from torch.utils.data import DataLoader from transformers import default_data_collator batch_size = 64 train_dataloader = DataLoader( downsampled_dataset["train"], shuffle=True, batch_size=batch_size, collate_fn=data_collator, ) eval_dataloader = DataLoader( eval_dataset, batch_size=batch_size, collate_fn=default_data_collator ) from torch.optim import AdamW optimizer = AdamW(model.parameters(), lr=5e-5) from accelerate import Accelerator accelerator = Accelerator() model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) from transformers import get_scheduler num_train_epochs = 3 num_update_steps_per_epoch = len(train_dataloader) num_training_steps = num_train_epochs * num_update_steps_per_epoch lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps, ) from huggingface_hub import get_full_repo_name model_name = "camembert-base-finetuned-allocine-accelerate" repo_name = get_full_repo_name(model_name) repo_name from huggingface_hub import Repository output_dir = model_name repo = Repository(output_dir, clone_from=repo_name) from tqdm.auto import tqdm import torch import math progress_bar = tqdm(range(num_training_steps)) for epoch in range(num_train_epochs): # Entraînement model.train() for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) # Evaluation model.eval() losses = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) loss = outputs.loss losses.append(accelerator.gather(loss.repeat(batch_size))) losses = torch.cat(losses) losses = losses[: len(eval_dataset)] try: perplexity = math.exp(torch.mean(losses)) except OverflowError: perplexity = float("inf") print(f">>> Epoch {epoch}: Perplexity: {perplexity}") # Sauvegarder et télécharger accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(output_dir, save_function=accelerator.save) if accelerator.is_main_process: tokenizer.save_pretrained(output_dir) repo.push_to_hub( commit_message=f"Training in progress epoch {epoch}", blocking=False ) from transformers import pipeline mask_filler = pipeline( "fill-mask", model="huggingface-course/camembert-base-finetuned-allocine", tokenizer="huggingface-course/camembert-base-finetuned-allocine", ) preds = mask_filler(text) for pred in preds: print(f">>> {pred['sequence']}")<jupyter_output><empty_output>
notebooks/course/fr/chapter7/section3_pt.ipynb/0
{ "file_path": "notebooks/course/fr/chapter7/section3_pt.ipynb", "repo_id": "notebooks", "token_count": 4290 }
144
<jupyter_start><jupyter_text>Construire votre première démo Installez les bibliothèques 🤗 Transformers et 🤗 Gradio pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece] !pip install gradio import gradio as gr def greet(name): return "Bonjour " + name demo = gr.Interface(fn=greet, inputs="text", outputs="text") demo.launch() import gradio as gr def greet(name): return "Bonjour " + name # Nous instancions la classe Textbox textbox = gr.Textbox(label="Tapez votre nom ici :", placeholder="Marie Martin", lines=2) gr.Interface(fn=greet, inputs=textbox, outputs="text").launch() from transformers import pipeline model = pipeline("text-generation", model="asi/gpt-fr-cased-small") def predict(prompt): completion = model(prompt)[0]["generated_text"] return completion import gradio as gr gr.Interface(fn=predict, inputs="text", outputs="text").launch()<jupyter_output><empty_output>
notebooks/course/fr/chapter9/section2.ipynb/0
{ "file_path": "notebooks/course/fr/chapter9/section2.ipynb", "repo_id": "notebooks", "token_count": 326 }
145
<jupyter_start><jupyter_text>In-painting pipeline for Stable Diffusion using 🧨 Diffusers This notebook shows how to do text-guided in-painting with Stable Diffusion model using 🤗 Hugging Face [🧨 Diffusers library](https://github.com/huggingface/diffusers). For a general introduction to the Stable Diffusion model please refer to this [colab](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb).<jupyter_code>!pip install -qq -U diffusers==0.11.1 transformers ftfy gradio accelerate<jupyter_output><empty_output><jupyter_text>To use private and gated models on 🤗 Hugging Face Hub, login is required. If you are only using a public checkpoint (such as `runwayml/stable-diffusion-inpainting` in this notebook), you can skip this step.<jupyter_code>from huggingface_hub import notebook_login notebook_login() import inspect from typing import List, Optional, Union import numpy as np import torch import PIL import gradio as gr from diffusers import StableDiffusionInpaintPipeline device = "cuda" model_path = "runwayml/stable-diffusion-inpainting" pipe = StableDiffusionInpaintPipeline.from_pretrained( model_path, torch_dtype=torch.float16, ).to(device) import requests from io import BytesIO def image_grid(imgs, rows, cols): assert len(imgs) == rows*cols w, h = imgs[0].size grid = PIL.Image.new('RGB', size=(cols*w, rows*h)) grid_w, grid_h = grid.size for i, img in enumerate(imgs): grid.paste(img, box=(i%cols*w, i//cols*h)) return grid def download_image(url): response = requests.get(url) return PIL.Image.open(BytesIO(response.content)).convert("RGB") img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" image = download_image(img_url).resize((512, 512)) image mask_image = download_image(mask_url).resize((512, 512)) mask_image prompt = "a mecha robot sitting on a bench" guidance_scale=7.5 num_samples = 3 generator = torch.Generator(device="cuda").manual_seed(0) # change the seed to get different results images = pipe( prompt=prompt, image=image, mask_image=mask_image, guidance_scale=guidance_scale, generator=generator, num_images_per_prompt=num_samples, ).images # insert initial image in the list so we can compare side by side images.insert(0, image) image_grid(images, 1, num_samples + 1)<jupyter_output><empty_output><jupyter_text>Gradio Demo<jupyter_code>def predict(dict, prompt): image = dict['image'].convert("RGB").resize((512, 512)) mask_image = dict['mask'].convert("RGB").resize((512, 512)) images = pipe(prompt=prompt, image=image, mask_image=mask_image).images return(images[0]) gr.Interface( predict, title = 'Stable Diffusion In-Painting', inputs=[ gr.Image(source = 'upload', tool = 'sketch', type = 'pil'), gr.Textbox(label = 'prompt') ], outputs = [ gr.Image() ] ).launch(debug=True)<jupyter_output>Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. To turn off, set debug=False in launch(). Running on public URL: https://e52f060882d60b09.gradio.app This share link expires in 72 hours. For free permanent hosting and GPU upgrades (NEW!), check out Spaces: https://huggingface.co/spaces
notebooks/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb/0
{ "file_path": "notebooks/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb", "repo_id": "notebooks", "token_count": 1254 }
146
# IDEFICS Demos/examples ## Inference - [Normal inference](inference.py) (needs ~20GB GPU memory) - [4bit quantized inference](inference_4bit.py) (needs ~7GB GPU memory) ## Finetuning The following demos use the Image captioning task: - [PEFT (LORA) finetuning (notebook)](finetune_image_captioning_peft.ipynb) (fits on Google colab) - [Normal finetuning](finetune_image_captioning.py) (needs ~40GB GPU memory)
notebooks/examples/idefics/README.md/0
{ "file_path": "notebooks/examples/idefics/README.md", "repo_id": "notebooks", "token_count": 148 }
147
<jupyter_start><jupyter_text>If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers as well as some other libraries. Uncomment the following cell and run it.<jupyter_code># Install !pip install -q biopython transformers datasets huggingface_hub accelerate<jupyter_output><empty_output><jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to login to the huggingface hub<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Then you need to install Git-LFS. Uncomment the following instructions:<jupyter_code>!apt install git-lfs<jupyter_output>Reading package lists... Done Building dependency tree Reading state information... Done git-lfs is already the newest version (2.9.2-1). 0 upgraded, 0 newly installed, 0 to remove and 16 not upgraded.<jupyter_text>We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("nucleotide_transformer_dna_sequence_modeling_notebook", framework="pytorch")<jupyter_output><empty_output><jupyter_text>**Fine-Tuning the Nucleotide-transformer** The **Nucleotide Transformer** paper [Dalla-torre et al, 2023](https://www.biorxiv.org/content/10.1101/2023.01.11.523679v2) introduces 4 genomics foundational models developed by **InstaDeep**. These transformers, of various sizes and trained on different datasets, allow powerful representations of DNA sequences that allow to tackle a very diverse set of problems such as chromatin accessibility, deleteriousness prediction, promoter and enhancer prediction etc... These representations can be extracted from the transformer and used as proxies of the DNA sequences (this is called probing) or the transformer can be trained further on a specific task (this is called finetuning). This notebook allows you to fine-tune these models.The model we are going to use is the [500M Human Ref model](https://huggingface.co/InstaDeepAI/nucleotide-transformer-500m-1000g), which is a 500M parameters transformer pre-trained on the human reference genome, per the training methodology presented in the Nucleotide Transformer Paper. It is one of the 4 models introduced, all available on the [Instadeep HuggingFace page](https://huggingface.co/InstaDeepAI):```| Model name | Num layers | Num parameters | Training dataset ||---------------------|------------|----------------|------------------------|| `500M Human Ref` | 24 | 500M | Human reference genome || `500M 1000G` | 24 | 500M | 1000G genomes || `2.5B 1000G` | 32 | 2.5B | 1000G genomes || `2.5B Multispecies` | 32 | 2.5B | Multi-species dataset |```Note that using the larger models will require more GPU memory and produce longer finetuning timesIn the following, we showcase the nucleotide transformer ability to classify genomic sequences as two of the most basic genomic motifs: **promoters** and **enhancers types**. Both of them are classification task, but the enhancers types task is much more challenging with its 3 classes.These two tasks are still very basic, but the nucleotide transformers have been shown to beat/match state of the art models on much more complex tasks such as [DeepSEA](https://www.nature.com/articles/nmeth.3547), which, given a DNA sequence, predicts 919 chromatin profiles from a diverse set of human cells and tissues from a single sequence or [DeepSTARR](https://www.nature.com/articles/s41588-022-01048-5), which predicts an enhancer's activity. **Importing required packages** **Import and install**<jupyter_code># Imports from transformers import AutoTokenizer, TrainingArguments, Trainer, AutoModelForSequenceClassification import torch from sklearn.metrics import matthews_corrcoef, f1_score from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np # Define the working device device = torch.device("cuda")<jupyter_output><empty_output><jupyter_text>**Prepare and create the model for fine-tuning** The nucleotide transformer will be fine-tuned on two **classification tasks**: **promoter** and **enhancer types** classification.The `AutoModelForSequenceClassification` module automatically loads the model and adds a simple classification head on top of the final embeddings. **First task : Promoter prediction** Promoter prediction is a **sequence classification** problem, in which the DNA sequence is predicted to be either a promoter or not.A promoter is a region of DNA where transcription of a gene is initiated. Promoters are a vital component of expression vectors because they control the binding of RNA polymerase to DNA. RNA polymerase transcribes DNA to mRNA which is ultimately translated into a functional protein This task was introduced in [DeePromoter](https://www.frontiersin.org/articles/10.3389/fgene.2019.00286/full), where a set of TATA and non-TATA promoters was gathered. A negative sequence was generated from each promoter, by randomly sampling subsets of the sequence, to guarantee that some obvious motifs were present both in the positive and negative dataset.<jupyter_code>num_labels_promoter = 2 # Load the model model = AutoModelForSequenceClassification.from_pretrained("InstaDeepAI/nucleotide-transformer-500m-human-ref", num_labels=num_labels_promoter) model = model.to(device)<jupyter_output><empty_output><jupyter_text>**Dataset loading and preparation**<jupyter_code>from datasets import load_dataset, Dataset # Load the promoter dataset from the InstaDeep Hugging Face ressources dataset_name = "promoter_all" train_dataset_promoter = load_dataset( "InstaDeepAI/nucleotide_transformer_downstream_tasks", dataset_name, split="train", streaming= False, ) test_dataset_promoter = load_dataset( "InstaDeepAI/nucleotide_transformer_downstream_tasks", dataset_name, split="test", streaming= False, ) # Get training data train_sequences_promoter = train_dataset_promoter['sequence'] train_labels_promoter = train_dataset_promoter['label'] # Split the dataset into a training and a validation dataset train_sequences_promoter, validation_sequences_promoter, train_labels_promoter, validation_labels_promoter = train_test_split(train_sequences_promoter, train_labels_promoter, test_size=0.05, random_state=42) # Get test data test_sequences_promoter = test_dataset_promoter['sequence'] test_labels_promoter = test_dataset_promoter['label']<jupyter_output><empty_output><jupyter_text>Let us have a look at the data. If we extract the last sequence of the dataset, we see that it is indeed a promoter, as its label is 1. Furthermore, we can also see that it is a TATA promoter, as the TATA motif is present at the 221th nucleotide of the sequence!<jupyter_code>idx_sequence = -1 sequence, label = train_sequences_promoter[idx_sequence], train_labels_promoter[idx_sequence] print(f"The DNA sequence is {sequence}.") print(f"Its associated label is label {label}.") idx_TATA = sequence.find("TATA") print(f"This promoter is a TATA promoter, as the TATA motif is present at the {idx_TATA}th nucleotide.")<jupyter_output>The DNA sequence is CACACCAGACAAAATTTGGTTAATTTGCGCCCAATATTCATTACTTTGACCTAACCTTTGTTCTGAAGGCCGTGTACAAGGACAAGGCCCTGAGATTATTGCAACAGTAACTTGAAAAACTTTCAGAAGTCTATTCTGTAGGATTAAAGGAATGCTGAGACTATTCAAGTTTGAAGTCCTGGGGGTGGGGAAAAATAAAAAACCTGTGCTAGAAAGCTTAGTATAGCATGTAACTTTAGAGTCCTGTGGAGTCCTGAGTCTCCCACAGACCAGAACAGTCATTTAAAAGTTTTCAGGAAA. Its associated label is label 1. This promoter is a TATA promoter, as the TATA motif is present at the 221th nucleotide.<jupyter_text>**Tokenizing the datasets** All inputs to neural nets must be numerical. The process of converting strings into numerical indices suitable for a neural net is called **tokenization**.<jupyter_code># Load the tokenizer tokenizer = AutoTokenizer.from_pretrained("InstaDeepAI/nucleotide-transformer-500m-human-ref") # Promoter dataset ds_train_promoter = Dataset.from_dict({"data": train_sequences_promoter,'labels':train_labels_promoter}) ds_validation_promoter = Dataset.from_dict({"data": validation_sequences_promoter,'labels':validation_labels_promoter}) ds_test_promoter = Dataset.from_dict({"data": test_sequences_promoter,'labels':test_labels_promoter}) def tokenize_function(examples): outputs = tokenizer(examples["data"]) return outputs # Creating tokenized promoter dataset tokenized_datasets_train_promoter = ds_train_promoter.map( tokenize_function, batched=True, remove_columns=["data"], ) tokenized_datasets_validation_promoter = ds_validation_promoter.map( tokenize_function, batched=True, remove_columns=["data"], ) tokenized_datasets_test_promoter = ds_test_promoter.map( tokenize_function, batched=True, remove_columns=["data"], )<jupyter_output><empty_output><jupyter_text>**Fine-tuning and evaluation** The hyper-parameters introduced here are different from the ones used in the paper since we are training the whole model. Further hyper-parameters search will surely improve the performance on the task!.We initialize our `TrainingArguments`. These control the various training hyperparameters, and will be passed to our `Trainer`.<jupyter_code>batch_size = 8 model_name='nucleotide-transformer' args_promoter = TrainingArguments( f"{model_name}-finetuned-NucleotideTransformer", remove_unused_columns=False, evaluation_strategy="steps", save_strategy="steps", learning_rate=1e-5, per_device_train_batch_size=batch_size, gradient_accumulation_steps= 1, per_device_eval_batch_size= 64, num_train_epochs= 2, logging_steps= 100, load_best_model_at_end=True, # Keep the best model according to the evaluation metric_for_best_model="f1_score", label_names=["labels"], dataloader_drop_last=True, max_steps= 1000 )<jupyter_output><empty_output><jupyter_text>Next, we define the metric we will use to evaluate our models and write a `compute_metrics` function. We can load this from the `scikit-learn` library.<jupyter_code># Define the metric for the evaluation using the f1 score def compute_metrics_f1_score(eval_pred): """Computes F1 score for binary classification""" predictions = np.argmax(eval_pred.predictions, axis=-1) references = eval_pred.label_ids r={'f1_score': f1_score(references, predictions)} return r trainer = Trainer( model.to(device), args_promoter, train_dataset= tokenized_datasets_train_promoter, eval_dataset= tokenized_datasets_validation_promoter, tokenizer=tokenizer, compute_metrics=compute_metrics_f1_score, )<jupyter_output><empty_output><jupyter_text>We can now finetune our model by just calling the `train` method:<jupyter_code>train_results = trainer.train()<jupyter_output>/usr/local/lib/python3.10/dist-packages/transformers/optimization.py:411: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning warnings.warn(<jupyter_text>Note that the finetuning is done with a small batch size (8). The training time can be reduced by increasing the batch size, as it leverages parallelism in the GPU. **Validation F1 score**<jupyter_code>curve_evaluation_f1_score =[[a['step'],a['eval_f1_score']] for a in trainer.state.log_history if 'eval_f1_score' in a.keys()] eval_f1_score = [c[1] for c in curve_evaluation_f1_score] steps = [c[0] for c in curve_evaluation_f1_score] plt.plot(steps, eval_f1_score, 'b', label='Validation F1 score') plt.title('Validation F1 score for promoter prediction') plt.xlabel('Number of training steps performed') plt.ylabel('Validation F1 score') plt.legend() plt.show()<jupyter_output><empty_output><jupyter_text>**F1 score on the test dataset**<jupyter_code># Compute the F1 score on the test dataset : print(f"F1 score on the test dataset: {trainer.predict(tokenized_datasets_test_promoter).metrics['test_f1_score']}")<jupyter_output><empty_output><jupyter_text>For the promoter prediction task, we obtain a perforance that is already close to the one displayed in the [**article**](https://www.biorxiv.org/content/10.1101/2023.01.11.523679v1.full.pdf) by training on only 1000 steps. A F1 score of **0.938** is obtained after just **1000 training steps**. To get closer to the **0.954** score obtained in the nucleotide transformer paper after 10,000 training steps, we surely need to train for longer! **Second task : Enhancer type prediction** In this section, we fine-tune the nucleotide transformer model on **enhancer type prediction**, which consists in classifying a DNA sequence as **strong**, **weak** or **non enhancer**.In genetics, an enhancer is a short (50–1500 bp) region of DNA that can be bound by proteins (activators) to increase the likelihood that transcription of a particular gene will occur.[A deep learning framework for enhancer prediction using word embedding and sequence generation](https://www.sciencedirect.com/science/article/abs/pii/S0301462222000643) introduced the dataset used here by augmenting an original set of enhancers with 6000 synthetic enhancers and 6000 syntheticnon-enhancers produced through a generative model. Model<jupyter_code>num_labels_enhancers_types = 3 # Load the model model = AutoModelForSequenceClassification.from_pretrained("InstaDeepAI/nucleotide-transformer-500m-human-ref", num_labels=num_labels_enhancers_types) model = model.to(device)<jupyter_output>Some weights of the model checkpoint at InstaDeepAI/nucleotide-transformer-500m-human-ref were not used when initializing EsmForSequenceClassification: ['lm_head.layer_norm.weight', 'lm_head.layer_norm.bias', 'lm_head.dense.bias', 'lm_head.dense.weight', 'lm_head.decoder.weight', 'lm_head.bias'] - This IS expected if you are initializing EsmForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing EsmForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of EsmForSequenceClassification were not initialized from the model checkpoint at InstaDeepAI/nucleotide-transformer-500m-human-ref and are newly initialized: ['classifier.out_proj.bias', 'classifier[...]<jupyter_text>**Dataset loading and preparation**<jupyter_code>from datasets import load_dataset, Dataset # Load the enhancers dataset from the InstaDeep Hugging Face ressources dataset_name = "enhancers_types" train_dataset_enhancers = load_dataset( "InstaDeepAI/nucleotide_transformer_downstream_tasks", dataset_name, split="train", streaming= False, ) test_dataset_enhancers = load_dataset( "InstaDeepAI/nucleotide_transformer_downstream_tasks", dataset_name, split="test", streaming= False, ) # Get training data train_sequences_enhancers = train_dataset_enhancers['sequence'] train_labels_enhancers = train_dataset_enhancers['label'] # Split the dataset into a training and a validation dataset train_sequences_enhancers, validation_sequences_enhancers, train_labels_enhancers, validation_labels_enhancers = train_test_split(train_sequences_enhancers, train_labels_enhancers, test_size=0.10, random_state=42) # Get test data test_sequences_enhancers = test_dataset_enhancers['sequence'] test_labels_enhancers = test_dataset_enhancers['label']<jupyter_output><empty_output><jupyter_text>**Tokenizing the datasets**<jupyter_code># Enhancer dataset ds_train_enhancers = Dataset.from_dict({"data": train_sequences_enhancers,'labels':train_labels_enhancers}) ds_validation_enhancers = Dataset.from_dict({"data": validation_sequences_enhancers,'labels':validation_labels_enhancers}) ds_test_enhancers = Dataset.from_dict({"data": test_sequences_enhancers,'labels':test_labels_enhancers}) # Creating tokenized enhancer dataset tokenized_datasets_train_enhancers = ds_train_enhancers.map( tokenize_function, batched=True, remove_columns=["data"], ) tokenized_datasets_validation_enhancers = ds_validation_enhancers.map( tokenize_function, batched=True, remove_columns=["data"], ) tokenized_datasets_test_enhancers = ds_test_enhancers.map( tokenize_function, batched=True, remove_columns=["data"], )<jupyter_output><empty_output><jupyter_text>**Fine-tuning and evaluation** As with the promoters task, the hyper-parameters introduced here are different from the ones used in the paper since we are training the whole model. Further hyper-parameters search will surely improve the performance on the task!.We initialize our `TrainingArguments`. These control the various training hyperparameters, and will be passed to our `Trainer`.<jupyter_code>batch_size = 8 model_name='nucleotide-transformer' args_enhancers = TrainingArguments( f"{model_name}-finetuned-NucleotideTransformer", remove_unused_columns=False, evaluation_strategy="steps", save_strategy="steps", learning_rate=1e-5, per_device_train_batch_size=batch_size, gradient_accumulation_steps= 1, per_device_eval_batch_size= 64, num_train_epochs= 2, logging_steps= 100, load_best_model_at_end=True, # Keep the best model according to the evaluation metric_for_best_model="mcc_score", # The mcc_score on the evaluation dataset used to select the best model label_names=["labels"], dataloader_drop_last=True, max_steps= 1000 )<jupyter_output><empty_output><jupyter_text>Here, the metric used to evaluate the model is the Matthews Correlation Coefficient, which is more relevant than the accuracy when the classes in the dataset are unbalanced. We can load a predefined function from the `scikit-learn` library.<jupyter_code># Define the metric for the evaluation def compute_metrics_mcc(eval_pred): """Computes Matthews correlation coefficient (MCC score) for binary classification""" predictions = np.argmax(eval_pred.predictions, axis=-1) references = eval_pred.label_ids r={'mcc_score': matthews_corrcoef(references, predictions)} return r trainer = Trainer( model, args_enhancers, train_dataset= tokenized_datasets_train_enhancers, eval_dataset= tokenized_datasets_validation_enhancers, tokenizer=tokenizer, compute_metrics=compute_metrics_mcc, )<jupyter_output><empty_output><jupyter_text>We can now finetune our model by just calling the `train` method:<jupyter_code>train_results = trainer.train()<jupyter_output>/usr/local/lib/python3.10/dist-packages/transformers/optimization.py:411: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning warnings.warn(<jupyter_text>As with the first task, the time can be greatly reduced by increasing the batch size. **Validation MCC score**<jupyter_code>curve_evaluation_mcc_score=[[a['step'],a['eval_mcc_score']] for a in trainer.state.log_history if 'eval_mcc_score' in a.keys()] eval_mcc_score = [c[1] for c in curve_evaluation_mcc_score] steps = [c[0] for c in curve_evaluation_mcc_score] plt.plot(steps, eval_mcc_score, 'b', label='Validation MCC score') plt.title('Validation MCC score for enhancer prediction') plt.xlabel('Number of training steps performed') plt.ylabel('Validation MCC score') plt.legend() plt.show()<jupyter_output><empty_output><jupyter_text>**MCC on the test dataset**<jupyter_code># Compute the MCC score on the test dataset : print(f"MCC score on the test dataset: {trainer.predict(tokenized_datasets_test_enhancers).metrics['test_mcc_score']}")<jupyter_output><empty_output>
notebooks/examples/nucleotide_transformer_dna_sequence_modelling.ipynb/0
{ "file_path": "notebooks/examples/nucleotide_transformer_dna_sequence_modelling.ipynb", "repo_id": "notebooks", "token_count": 6637 }
148
<jupyter_start><jupyter_text>If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers and 🤗 Datasets as well as other dependencies. Uncomment the following cell and run it.<jupyter_code>#! pip install datasets evaluate transformers rouge-score nltk<jupyter_output><empty_output><jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then execute the following cell and input your username and password:<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Then you need to install Git-LFS. Uncomment the following instructions:<jupyter_code># !apt install git-lfs<jupyter_output><empty_output><jupyter_text>Make sure your version of Transformers is at least 4.11.0 since the functionality was introduced in that version:<jupyter_code>import transformers print(transformers.__version__)<jupyter_output><empty_output><jupyter_text>You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/master/examples/seq2seq). We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("summarization_notebook", framework="pytorch")<jupyter_output><empty_output><jupyter_text>Fine-tuning a model on a summarization task In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) model for a summarization task. We will use the [XSum dataset](https://arxiv.org/pdf/1808.08745.pdf) (for extreme summarization) which contains BBC articles accompanied with single-sentence summaries.We will see how to easily load the dataset for this task using 🤗 Datasets and how to fine-tune a model on it using the `Trainer` API.<jupyter_code>model_checkpoint = "t5-small"<jupyter_output><empty_output><jupyter_text>This notebook is built to run with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a sequence-to-sequence version in the Transformers library. Here we picked the [`t5-small`](https://huggingface.co/t5-small) checkpoint. Loading the dataset We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download the data and get the metric we need to use for evaluation (to compare our model to the benchmark). This can be easily done with the functions `load_dataset` and `load_metric`.<jupyter_code>from datasets import load_dataset from evaluate import load raw_datasets = load_dataset("xsum") metric = load("rouge")<jupyter_output>2023-06-06 12:43:46.062773: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. Found cached dataset xsum (/home/sudolife/.cache/huggingface/datasets/xsum/default/1.2.0/082863bf4754ee058a5b6f6525d0cb2b18eadb62c7b370b095d1364050a52b71)<jupyter_text>The `dataset` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key for the training, validation and test set:<jupyter_code>raw_datasets<jupyter_output><empty_output><jupyter_text>To access an actual element, you need to select a split first, then give an index:<jupyter_code>raw_datasets["train"][0]<jupyter_output><empty_output><jupyter_text>To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset.<jupyter_code>import datasets import random import pandas as pd from IPython.display import display, HTML def show_random_elements(dataset, num_examples=5): assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset." picks = [] for _ in range(num_examples): pick = random.randint(0, len(dataset)-1) while pick in picks: pick = random.randint(0, len(dataset)-1) picks.append(pick) df = pd.DataFrame(dataset[picks]) for column, typ in dataset.features.items(): if isinstance(typ, datasets.ClassLabel): df[column] = df[column].transform(lambda i: typ.names[i]) display(HTML(df.to_html())) show_random_elements(raw_datasets["train"])<jupyter_output><empty_output><jupyter_text>The metric is an instance of [`datasets.Metric`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasets.Metric):<jupyter_code>metric<jupyter_output><empty_output><jupyter_text>You can call its `compute` method with your predictions and labels, which need to be list of decoded strings:<jupyter_code>fake_preds = ["hello there", "general kenobi"] fake_labels = ["hello there", "general kenobi"] metric.compute(predictions=fake_preds, references=fake_labels)<jupyter_output><empty_output><jupyter_text>Preprocessing the data Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that the model requires.To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:- we get a tokenizer that corresponds to the model architecture we want to use,- we download the vocabulary used when pretraining this specific checkpoint.That vocabulary will be cached, so it's not downloaded again the next time we run the cell.<jupyter_code>from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)<jupyter_output><empty_output><jupyter_text>By default, the call above will use one of the fast tokenizers (backed by Rust) from the 🤗 Tokenizers library. You can directly call this tokenizer on one sentence or a pair of sentences:<jupyter_code>tokenizer("Hello, this one sentence!")<jupyter_output><empty_output><jupyter_text>Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested.Instead of one sentence, we can pass along a list of sentences:<jupyter_code>tokenizer(["Hello, this one sentence!", "This is another sentence."])<jupyter_output><empty_output><jupyter_text>To prepare the targets for our model, we need to tokenize them using the `text_target` parameter. This will make sure the tokenizer uses the special tokens corresponding to the targets:<jupyter_code>print(tokenizer(text_target=["Hello, this one sentence!", "This is another sentence."]))<jupyter_output>{'input_ids': [[8774, 6, 48, 80, 7142, 55, 1], [100, 19, 430, 7142, 5, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]]}<jupyter_text>If you are using one of the five T5 checkpoints we have to prefix the inputs with "summarize:" (the model can also translate and it needs the prefix to know which task it has to perform).<jupyter_code>if model_checkpoint in ["t5-small", "t5-base", "t5-larg", "t5-3b", "t5-11b"]: prefix = "summarize: " else: prefix = ""<jupyter_output><empty_output><jupyter_text>We can then write the function that will preprocess our samples. We just feed them to the `tokenizer` with the argument `truncation=True`. This will ensure that an input longer that what the model selected can handle will be truncated to the maximum length accepted by the model. The padding will be dealt with later on (in a data collator) so we pad examples to the longest length in the batch and not the whole dataset.<jupyter_code>max_input_length = 1024 max_target_length = 128 def preprocess_function(examples): inputs = [prefix + doc for doc in examples["document"]] model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True) # Setup the tokenizer for targets labels = tokenizer(text_target=examples["summary"], max_length=max_target_length, truncation=True) model_inputs["labels"] = labels["input_ids"] return model_inputs<jupyter_output><empty_output><jupyter_text>This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:<jupyter_code>preprocess_function(raw_datasets['train'][:2])<jupyter_output><empty_output><jupyter_text>To apply this function on all the pairs of sentences in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command.<jupyter_code>tokenized_datasets = raw_datasets.map(preprocess_function, batched=True)<jupyter_output>WARNING:datasets.arrow_dataset:Loading cached processed dataset at /home/sudolife/.cache/huggingface/datasets/xsum/default/1.2.0/082863bf4754ee058a5b6f6525d0cb2b18eadb62c7b370b095d1364050a52b71/cache-c29884c8f95fd0b3.arrow WARNING:datasets.arrow_dataset:Loading cached processed dataset at /home/sudolife/.cache/huggingface/datasets/xsum/default/1.2.0/082863bf4754ee058a5b6f6525d0cb2b18eadb62c7b370b095d1364050a52b71/cache-c7c633359ea6092f.arrow WARNING:datasets.arrow_dataset:Loading cached processed dataset at /home/sudolife/.cache/huggingface/datasets/xsum/default/1.2.0/082863bf4754ee058a5b6f6525d0cb2b18eadb62c7b370b095d1364050a52b71/cache-e1e5d997fb514ab9.arrow<jupyter_text>Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. Fine-tuning the model Now that our data is ready, we can download the pretrained model and fine-tune it. Since our task is of the sequence-to-sequence kind, we use the `AutoModelForSeq2SeqLM` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us.<jupyter_code>from transformers import AutoModelForSeq2SeqLM, DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)<jupyter_output><empty_output><jupyter_text>Note that we don't get a warning like in our classification example. This means we used all the weights of the pretrained model and there is no randomly initialized head in this case. To instantiate a `Seq2SeqTrainer`, we will need to define three more things. The most important is the [`Seq2SeqTrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.htmltransformers.Seq2SeqTrainingArguments), which is a class that contains all the attributes to customize the training. It requires one folder name, which will be used to save the checkpoints of the model, and all other arguments are optional:<jupyter_code>batch_size = 16 model_name = model_checkpoint.split("/")[-1] args = Seq2SeqTrainingArguments( f"{model_name}-finetuned-xsum", evaluation_strategy = "epoch", learning_rate=2e-5, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, weight_decay=0.01, save_total_limit=3, num_train_epochs=1, predict_with_generate=True, fp16=True, push_to_hub=True, )<jupyter_output><empty_output><jupyter_text>Here we set the evaluation to be done at the end of each epoch, tweak the learning rate, use the `batch_size` defined at the top of the cell and customize the weight decay. Since the `Seq2SeqTrainer` will save the model regularly and our dataset is quite large, we tell it to make three saves maximum. Lastly, we use the `predict_with_generate` option (to properly generate summaries) and activate mixed precision training (to go a bit faster).The last argument to setup everything so we can push the model to the [Hub](https://huggingface.co/models) regularly during training. Remove it if you didn't follow the installation steps at the top of the notebook. If you want to save your model locally in a name that is different than the name of the repository it will be pushed, or if you want to push your model under an organization and not your name space, use the `hub_model_id` argument to set the repo name (it needs to be the full name, including your namespace: for instance `"sgugger/t5-finetuned-xsum"` or `"huggingface/t5-finetuned-xsum"`).Then, we need a special kind of data collator, which will not only pad the inputs to the maximum length in the batch, but also the labels:<jupyter_code>data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)<jupyter_output><empty_output><jupyter_text>The last thing to define for our `Seq2SeqTrainer` is how to compute the metrics from the predictions. We need to define a function for this, which will just use the `metric` we loaded earlier, and we have to do a bit of pre-processing to decode the predictions into texts:<jupyter_code>import nltk import numpy as np def compute_metrics(eval_pred): predictions, labels = eval_pred decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True) # Replace -100 in the labels as we can't decode them. labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) # Rouge expects a newline after each sentence decoded_preds = ["\n".join(nltk.sent_tokenize(pred.strip())) for pred in decoded_preds] decoded_labels = ["\n".join(nltk.sent_tokenize(label.strip())) for label in decoded_labels] # Note that other metrics may not have a `use_aggregator` parameter # and thus will return a list, computing a metric for each sentence. result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True, use_aggregator=True) # Extract a few results result = {key: value * 100 for key, value in result.items()} # Add mean generated length prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in predictions] result["gen_len"] = np.mean(prediction_lens) return {k: round(v, 4) for k, v in result.items()}<jupyter_output><empty_output><jupyter_text>Then we just need to pass all of this along with our datasets to the `Seq2SeqTrainer`:<jupyter_code>trainer = Seq2SeqTrainer( model, args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation"], data_collator=data_collator, tokenizer=tokenizer, compute_metrics=compute_metrics )<jupyter_output><empty_output><jupyter_text>We can now finetune our model by just calling the `train` method:<jupyter_code>trainer.train()<jupyter_output><empty_output><jupyter_text>You can now upload the result of the training to the Hub, just execute this instruction:<jupyter_code>trainer.push_to_hub()<jupyter_output><empty_output>
notebooks/examples/summarization.ipynb/0
{ "file_path": "notebooks/examples/summarization.ipynb", "repo_id": "notebooks", "token_count": 5127 }
149
<jupyter_start><jupyter_text>Fine-tuning for Video Classification with 🤗 TransformersThis notebook shows how to fine-tune a pre-trained Vision model for Video Classification on a custom dataset. The idea is to add a randomly initialized classification head on top of a pre-trained encoder and fine-tune the model altogether on a labeled dataset. DatasetThis notebook uses a subset of the [UCF-101 dataset](https://www.crcv.ucf.edu/data/UCF101.php). We'll be using a subset of the dataset to keep the runtime of the tutorial short. The subset was prepared using [this notebook](https://drive.google.com/file/d/1tTScjnyiKrBz84jKe1H_hPGGXffAZuxX/view?usp=sharing) following [this guide](https://www.tensorflow.org/tutorials/load_data/video). ModelWe'll fine-tune the [VideoMAE model](https://huggingface.co/docs/transformers/model_doc/videomae), which was pre-trained on the [Kinetics 400 dataset](https://www.deepmind.com/open-source/kinetics). You can find the other variants of VideoMAE available on 🤗 Hub [here](https://huggingface.co/models?search=videomae). You can also extend this notebook to use other video models such as [X-CLIP](https://huggingface.co/docs/transformers/model_doc/xcliptransformers.XCLIPVisionModel). **Note** that for models where there's no classification head already available you'll have to manually attach it (randomly initialized). But this is not the case for VideoMAE since we already have a [`VideoMAEForVideoClassification`](https://huggingface.co/docs/transformers/model_doc/xcliptransformers.XCLIPVisionModel) class. Data preprocessingThis notebook leverages [TorchVision's](https://pytorch.org/vision/stable/transforms.html) and [PyTorchVideo's](https://pytorchvideo.org/) transforms for applying data preprocessing transformations including data augmentation.---Depending on the model and the GPU you are using, you might need to adjust the batch size to avoid out-of-memory errors. Set those two parameters, then the rest of the notebook should run smoothly.<jupyter_code>model_ckpt = "MCG-NJU/videomae-base" # pre-trained model from which to fine-tune batch_size = 8 # batch size for training and evaluation<jupyter_output><empty_output><jupyter_text>Before we start, let's install the `pytorchvideo`, `transformers`, and `evaluate` libraries.<jupyter_code>!pip install pytorchvideo transformers evaluate -q<jupyter_output> |████████████████████████████████| 132 kB 4.9 MB/s  |████████████████████████████████| 5.5 MB 51.0 MB/s  |████████████████████████████████| 72 kB 1.6 MB/s  |████████████████████████████████| 50 kB 7.3 MB/s  |████████████████████████████████| 30.7 MB 1.2 MB/s  |████████████████████████████████| 42 kB 1.1 MB/s  |████████████████████████████████| 7.6 MB 48.7 MB/s  |████████████████████████████████| 163 kB 89.3 MB/s  |████████████████████████████████| 115 kB 85.3 MB/s  |████████████████████████████████| 441 kB 85.1 MB/s  |████████████████████████████████| 212 kB 53.0 MB/s  |████████████████████████████████| 95 kB 5.6 MB/s  |████████████████████████████████| 127 kB 88.8 MB/s  |████████████████████████████████| 115 kB 86.0 MB/s [?25h Building wheel for pytorchvideo (setup.py) ... [?25l[?25hdone Building wheel for fvcore (setup.py) ... [?25l[?25hdone Building wheel for io[...]<jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then execute the following cell and input your token:<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output>Login successful Your token has been saved to /root/.huggingface/token<jupyter_text>Then you need to install Git-LFS to upload your model checkpoints:<jupyter_code>!git config --global credential.helper store<jupyter_output><empty_output><jupyter_text>We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("video_classification_notebook", framework="pytorch")<jupyter_output><empty_output><jupyter_text>Fine-tuning a model on a video classification task In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) vision models on a Video Classification dataset.Given a video, the goal is to predict an appropriate class for it, like "archery". Loading the dataset Here we first download the subset archive and un-archive it.<jupyter_code>from huggingface_hub import hf_hub_download hf_dataset_identifier = "sayakpaul/ucf101-subset" filename = "UCF101_subset.tar.gz" file_path = hf_hub_download( repo_id=hf_dataset_identifier, filename=filename, repo_type="dataset" ) !tar xf {file_path}<jupyter_output><empty_output><jupyter_text>Now, let's investigate what is inside the archive.<jupyter_code>dataset_root_path = "UCF101_subset" !find {dataset_root_path} | head -5<jupyter_output>UCF101_subset UCF101_subset/val UCF101_subset/val/BasketballDunk UCF101_subset/val/BasketballDunk/v_BasketballDunk_g05_c05.avi UCF101_subset/val/BasketballDunk/UCF101<jupyter_text>Broadly, `dataset_root_path` is organized like so:```bashUCF101_subset/ train/ BandMarching/ video_1.mp4 video_2.mp4 ... Archery video_1.mp4 video_2.mp4 ... ... val/ BandMarching/ video_1.mp4 video_2.mp4 ... Archery video_1.mp4 video_2.mp4 ... ... test/ BandMarching/ video_1.mp4 video_2.mp4 ... Archery video_1.mp4 video_2.mp4 ... ...``` Let's now count the number of total videos we have.<jupyter_code>import pathlib dataset_root_path = pathlib.Path(dataset_root_path) video_count_train = len(list(dataset_root_path.glob("train/*/*.avi"))) video_count_val = len(list(dataset_root_path.glob("val/*/*.avi"))) video_count_test = len(list(dataset_root_path.glob("test/*/*.avi"))) video_total = video_count_train + video_count_val + video_count_test print(f"Total videos: {video_total}") all_video_file_paths = ( list(dataset_root_path.glob("train/*/*.avi")) + list(dataset_root_path.glob("val/*/*.avi")) + list(dataset_root_path.glob("test/*/*.avi")) ) all_video_file_paths[:5]<jupyter_output><empty_output><jupyter_text>The video paths, when `sorted`, appear like so:```py...'UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g07_c04.avi','UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g07_c06.avi','UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g08_c01.avi','UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g09_c02.avi','UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g09_c06.avi'... ```We notice that there are video clips belonging to the same group / scene where group is denoted by `g` in the video file paths. `v_ApplyEyeMakeup_g07_c04.avi` and `v_ApplyEyeMakeup_g07_c06.avi`, for example. For the validation and evaluation splits, we wouldn't want to have video clips from the same group / scene to prevent [data leakage](https://www.kaggle.com/code/alexisbcook/data-leakage). The subset that we're using in this tutorial takes this information into account. Next up, we derive the set of labels we have in the dataset. Let's also create two dictionaries that'll be helpful when initializing the model:* `label2id`: maps the class names to integers.* `id2label`: maps the integers to class names.<jupyter_code>class_labels = sorted({str(path).split("/")[2] for path in all_video_file_paths}) label2id = {label: i for i, label in enumerate(class_labels)} id2label = {i: label for label, i in label2id.items()} print(f"Unique classes: {list(label2id.keys())}.")<jupyter_output>Unique classes: ['ApplyEyeMakeup', 'ApplyLipstick', 'Archery', 'BabyCrawling', 'BalanceBeam', 'BandMarching', 'BaseballPitch', 'Basketball', 'BasketballDunk', 'BenchPress'].<jupyter_text>We've got 10 unique classes. For each class we have 30 videos in the training set. Loading the model In the next cell, we initialize a video classification model where the encoder is initialized with the pre-trained parameters and the classification head is randomly initialized. We also initialize the feature extractor associated to the model. This will come in handy during writing the preprocessing pipeline for our dataset.<jupyter_code>from transformers import VideoMAEImageProcessor, VideoMAEForVideoClassification image_processor = VideoMAEImageProcessor.from_pretrained(model_ckpt) model = VideoMAEForVideoClassification.from_pretrained( model_ckpt, label2id=label2id, id2label=id2label, ignore_mismatched_sizes=True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint )<jupyter_output>The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a one-time only operation. You can interrupt this and resume the migration later on by calling `transformers.utils.move_cache()`.<jupyter_text>The warning is telling us we are throwing away some weights (e.g. the weights and bias of the `classifier` layer) and randomly initializing some other (the weights and bias of a new `classifier` layer). This is expected in this case, because we are adding a new head for which we don't have pretrained weights, so the library warns us we should fine-tune this model before using it for inference, which is exactly what we are going to do. **Note** that [this checkpoint](https://huggingface.co/MCG-NJU/videomae-base-finetuned-kinetics) leads to better performance on this task as the checkpoint was obtained fine-tuning on a similar downstream task having considerable domain overlap. You can check out [this checkpoint](https://huggingface.co/sayakpaul/videomae-base-finetuned-kinetics-finetuned-ucf101-subset) which was obtained by fine-tuning `MCG-NJU/videomae-base-finetuned-kinetics` and it obtains much better performance. Constructing the datasets for training For preprocessing the videos, we'll leverage the [PyTorch Video library](https://pytorchvideo.org/). We start by importing the dependencies we need.<jupyter_code>import pytorchvideo.data from pytorchvideo.transforms import ( ApplyTransformToKey, Normalize, RandomShortSideScale, RemoveKey, ShortSideScale, UniformTemporalSubsample, ) from torchvision.transforms import ( Compose, Lambda, RandomCrop, RandomHorizontalFlip, Resize, )<jupyter_output><empty_output><jupyter_text>For the training dataset transformations, we use a combination of uniform temporal subsampling, pixel normalization, random cropping, and random horizontal flipping. For the validation and evaluation dataset transformations, we keep the transformation chain the same except for random cropping and horizontal flipping. To learn more about the details of these transformations check out the [official documentation of PyTorch Video](https://pytorchvideo.org). We'll use the `image_processor` associated with the pre-trained model to obtain the following information:* Image mean and standard deviation with which the video frame pixels will be normalized.* Spatial resolution to which the video frames will be resized.<jupyter_code>import os mean = image_processor.image_mean std = image_processor.image_std if "shortest_edge" in image_processor.size: height = width = image_processor.size["shortest_edge"] else: height = image_processor.size["height"] width = image_processor.size["width"] resize_to = (height, width) num_frames_to_sample = model.config.num_frames sample_rate = 4 fps = 30 clip_duration = num_frames_to_sample * sample_rate / fps # Training dataset transformations. train_transform = Compose( [ ApplyTransformToKey( key="video", transform=Compose( [ UniformTemporalSubsample(num_frames_to_sample), Lambda(lambda x: x / 255.0), Normalize(mean, std), RandomShortSideScale(min_size=256, max_size=320), RandomCrop(resize_to), RandomHorizontalFlip(p=0.5), ] ), ), ] ) # Training dataset. train_dataset = pytorchvideo.data.Ucf101( data_path=os.path.join(dataset_root_path, "train"), clip_sampler=pytorchvideo.data.make_clip_sampler("random", clip_duration), decode_audio=False, transform=train_transform, ) # Validation and evaluation datasets' transformations. val_transform = Compose( [ ApplyTransformToKey( key="video", transform=Compose( [ UniformTemporalSubsample(num_frames_to_sample), Lambda(lambda x: x / 255.0), Normalize(mean, std), Resize(resize_to), ] ), ), ] ) # Validation and evaluation datasets. val_dataset = pytorchvideo.data.Ucf101( data_path=os.path.join(dataset_root_path, "val"), clip_sampler=pytorchvideo.data.make_clip_sampler("uniform", clip_duration), decode_audio=False, transform=val_transform, ) test_dataset = pytorchvideo.data.Ucf101( data_path=os.path.join(dataset_root_path, "test"), clip_sampler=pytorchvideo.data.make_clip_sampler("uniform", clip_duration), decode_audio=False, transform=val_transform, )<jupyter_output><empty_output><jupyter_text>**Note**: The above dataset pipelines are taken from the [official PyTorch Video example](https://pytorchvideo.org/docs/tutorial_classificationdataset). We're using the [`pytorchvideo.data.Ucf101()`](https://pytorchvideo.readthedocs.io/en/latest/api/data/data.htmlpytorchvideo.data.Ucf101) function because it's tailored for the UCF-101 dataset. Under the hood, it returns a [`pytorchvideo.data.labeled_video_dataset.LabeledVideoDataset`](https://pytorchvideo.readthedocs.io/en/latest/api/data/data.htmlpytorchvideo.data.LabeledVideoDataset) object. `LabeledVideoDataset` class is the base class for all things video in the PyTorch Video dataset. So, if you wanted to use a custom dataset not supported off-the-shelf by PyTorch Video, you can extend the `LabeledVideoDataset` class accordingly. Refer to the `data` API [documentation to](https://pytorchvideo.readthedocs.io/en/latest/api/data/data.html) learn more. Also, if your dataset follows a similar structure (as shown above), then using the `pytorchvideo.data.Ucf101()` should work just fine.<jupyter_code># We can access the `num_videos` argument to know the number of videos we have in the # dataset. train_dataset.num_videos, val_dataset.num_videos, test_dataset.num_videos<jupyter_output><empty_output><jupyter_text>Let's now take a preprocessed video from the dataset and investigate it.<jupyter_code>sample_video = next(iter(train_dataset)) sample_video.keys() def investigate_video(sample_video): """Utility to investigate the keys present in a single video sample.""" for k in sample_video: if k == "video": print(k, sample_video["video"].shape) else: print(k, sample_video[k]) print(f"Video label: {id2label[sample_video[k]]}") investigate_video(sample_video)<jupyter_output>video torch.Size([3, 16, 224, 224]) video_name v_Basketball_g01_c01.avi video_index 210 clip_index 0 aug_index 0 label 7 Video label: Basketball<jupyter_text>We can also visualize the preprocessed videos for easier debugging.<jupyter_code>import imageio import numpy as np from IPython.display import Image def unnormalize_img(img): """Un-normalizes the image pixels.""" img = (img * std) + mean img = (img * 255).astype("uint8") return img.clip(0, 255) def create_gif(video_tensor, filename="sample.gif"): """Prepares a GIF from a video tensor. The video tensor is expected to have the following shape: (num_frames, num_channels, height, width). """ frames = [] for video_frame in video_tensor: frame_unnormalized = unnormalize_img(video_frame.permute(1, 2, 0).numpy()) frames.append(frame_unnormalized) kargs = {"duration": 0.25} imageio.mimsave(filename, frames, "GIF", **kargs) return filename def display_gif(video_tensor, gif_name="sample.gif"): """Prepares and displays a GIF from a video tensor.""" video_tensor = video_tensor.permute(1, 0, 2, 3) gif_filename = create_gif(video_tensor, gif_name) return Image(filename=gif_filename) video_tensor = sample_video["video"] display_gif(video_tensor)<jupyter_output><empty_output><jupyter_text>Training the model We'll leverage [`Trainer`](https://huggingface.co/docs/transformers/main_classes/trainer) from 🤗 Transformers for training the model. To instantiate a `Trainer`, we will need to define the training configuration and an evaluation metric. The most important is the [`TrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.htmltransformers.TrainingArguments), which is a class that contains all the attributes to configure the training. It requires an output folder name, which will be used to save the checkpoints of the model. It also helps sync all the information in the model repository on 🤗 Hub.Most of the training arguments are pretty self-explanatory, but one that is quite important here is `remove_unused_columns=False`. This one will drop any features not used by the model's call function. By default it's `True` because usually it's ideal to drop unused feature columns, making it easier to unpack inputs into the model's call function. But, in our case, we need the unused features ('video' in particular) in order to create `pixel_values` (which is a mandatory key our model expects in its inputs).<jupyter_code>from transformers import TrainingArguments, Trainer model_name = model_ckpt.split("/")[-1] new_model_name = f"{model_name}-finetuned-ucf101-subset" num_epochs = 4 args = TrainingArguments( new_model_name, remove_unused_columns=False, evaluation_strategy="epoch", save_strategy="epoch", learning_rate=5e-5, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, warmup_ratio=0.1, logging_steps=10, load_best_model_at_end=True, metric_for_best_model="accuracy", push_to_hub=True, max_steps=(train_dataset.num_videos // batch_size) * num_epochs, )<jupyter_output><empty_output><jupyter_text>There's no need to define `max_steps` when instantiating `TrainingArguments`. Since the dataset returned by `pytorchvideo.data.Ucf101()` doesn't implement the `__len__()` method we had to specify `max_steps`. Next, we need to define a function for how to compute the metrics from the predictions, which will just use the `metric` we'll load now. The only preprocessing we have to do is to take the argmax of our predicted logits:<jupyter_code>import evaluate metric = evaluate.load("accuracy") # the compute_metrics function takes a Named Tuple as input: # predictions, which are the logits of the model as Numpy arrays, # and label_ids, which are the ground-truth labels as Numpy arrays. def compute_metrics(eval_pred): """Computes accuracy on a batch of predictions.""" predictions = np.argmax(eval_pred.predictions, axis=1) return metric.compute(predictions=predictions, references=eval_pred.label_ids)<jupyter_output><empty_output><jupyter_text>**A note on evaluation**:In the [VideoMAE paper](https://arxiv.org/abs/2203.12602), the authors use the following evaluation strategy. They evaluate the model on several clips from test videos and apply different crops to those clips and report the aggregate score. However, in the interest of simplicity and brevity, we don't consider that in this tutorial. We also define a `collate_fn`, which will be used to batch examples together.Each batch consists of 2 keys, namely `pixel_values` and `labels`.<jupyter_code>import torch def collate_fn(examples): """The collation function to be used by `Trainer` to prepare data batches.""" # permute to (num_frames, num_channels, height, width) pixel_values = torch.stack( [example["video"].permute(1, 0, 2, 3) for example in examples] ) labels = torch.tensor([example["label"] for example in examples]) return {"pixel_values": pixel_values, "labels": labels}<jupyter_output><empty_output><jupyter_text>Then we just need to pass all of this along with our datasets to the `Trainer`:<jupyter_code>trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=val_dataset, tokenizer=image_processor, compute_metrics=compute_metrics, data_collator=collate_fn, )<jupyter_output>Cloning https://huggingface.co/sayakpaul/videomae-base-finetuned-ucf101-subset into local empty directory. WARNING:huggingface_hub.repository:Cloning https://huggingface.co/sayakpaul/videomae-base-finetuned-ucf101-subset into local empty directory.<jupyter_text>You might wonder why we pass along the `image_processor` as a tokenizer when we already preprocessed our data. This is only to make sure the feature extractor configuration file (stored as JSON) will also be uploaded to the repo on the hub. Now we can finetune our model by calling the `train` method:<jupyter_code>train_results = trainer.train()<jupyter_output>/usr/local/lib/python3.7/dist-packages/transformers/optimization.py:310: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning FutureWarning, ***** Running training ***** Num examples = 1184 Num Epochs = 9223372036854775807 Instantaneous batch size per device = 8 Total train batch size (w. parallel, distributed & accumulation) = 8 Gradient Accumulation steps = 1 Total optimization steps = 148 Number of trainable parameters = 86234890<jupyter_text>We can check with the `evaluate` method that our `Trainer` did reload the best model properly (if it was not the last one):<jupyter_code>trainer.evaluate(test_dataset) trainer.save_model() test_results = trainer.evaluate(test_dataset) trainer.log_metrics("test", test_results) trainer.save_metrics("test", test_results) trainer.save_state()<jupyter_output>Saving model checkpoint to videomae-base-finetuned-ucf101-subset Configuration saved in videomae-base-finetuned-ucf101-subset/config.json Model weights saved in videomae-base-finetuned-ucf101-subset/pytorch_model.bin Feature extractor saved in videomae-base-finetuned-ucf101-subset/preprocessor_config.json Saving model checkpoint to videomae-base-finetuned-ucf101-subset Configuration saved in videomae-base-finetuned-ucf101-subset/config.json Model weights saved in videomae-base-finetuned-ucf101-subset/pytorch_model.bin Feature extractor saved in videomae-base-finetuned-ucf101-subset/preprocessor_config.json Several commits (2) will be pushed upstream. WARNING:huggingface_hub.repository:Several commits (2) will be pushed upstream. The progress bars may be unreliable. WARNING:huggingface_hub.repository:The progress bars may be unreliable.<jupyter_text>You can now upload the result of the training to the Hub, just execute this instruction (note that the Trainer will automatically create a model card as well as Tensorboard logs - see the "Training metrics" tab - amazing isn't it?):<jupyter_code>trainer.push_to_hub()<jupyter_output>Saving model checkpoint to videomae-base-finetuned-ucf101-subset Configuration saved in videomae-base-finetuned-ucf101-subset/config.json Model weights saved in videomae-base-finetuned-ucf101-subset/pytorch_model.bin Feature extractor saved in videomae-base-finetuned-ucf101-subset/preprocessor_config.json<jupyter_text>Now that our model is trained, let's use it to run inference on a video from `test_dataset`. Inference Let's load the trained model checkpoint and fetch a video from `test_dataset`.<jupyter_code>trained_model = VideoMAEForVideoClassification.from_pretrained(new_model_name) sample_test_video = next(iter(test_dataset)) investigate_video(sample_test_video)<jupyter_output>video torch.Size([3, 16, 224, 224]) video_name v_BasketballDunk_g12_c05.avi video_index 62 clip_index 0 aug_index 0 label 8 Video label: BasketballDunk<jupyter_text>We then prepare the video as a `torch.Tensor` and run inference.<jupyter_code>def run_inference(model, video): """Utility to run inference given a model and test video. The video is assumed to be preprocessed already. """ # (num_frames, num_channels, height, width) perumuted_sample_test_video = video.permute(1, 0, 2, 3) inputs = { "pixel_values": perumuted_sample_test_video.unsqueeze(0), "labels": torch.tensor( [sample_test_video["label"]] ), # this can be skipped if you don't have labels available. } device = torch.device("cuda" if torch.cuda.is_available() else "cpu") inputs = {k: v.to(device) for k, v in inputs.items()} model = model.to(device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits return logits logits = run_inference(trained_model, sample_test_video["video"])<jupyter_output><empty_output><jupyter_text>We can now check if the model got the prediction right.<jupyter_code>display_gif(sample_test_video["video"]) predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx])<jupyter_output>Predicted class: BasketballDunk
notebooks/examples/video_classification.ipynb/0
{ "file_path": "notebooks/examples/video_classification.ipynb", "repo_id": "notebooks", "token_count": 8881 }
150
import argparse import logging import os import random import sys from datasets import load_from_disk from sklearn.metrics import accuracy_score, precision_recall_fscore_support import torch from transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments, AutoTokenizer if __name__ == "__main__": parser = argparse.ArgumentParser() # hyperparameters sent by the client are passed as command-line arguments to the script. parser.add_argument("--epochs", type=int, default=3) parser.add_argument("--train_batch_size", type=int, default=32) parser.add_argument("--eval_batch_size", type=int, default=64) parser.add_argument("--warmup_steps", type=int, default=500) parser.add_argument("--model_name", type=str) parser.add_argument("--learning_rate", type=float, default=5e-5) # Data, model, and output directories parser.add_argument("--checkpoints", type=str, default="/opt/ml/checkpoints/") parser.add_argument("--model_dir", type=str, default=os.environ["SM_MODEL_DIR"]) parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"]) parser.add_argument("--training_dir", type=str, default=os.environ["SM_CHANNEL_TRAIN"]) parser.add_argument("--test_dir", type=str, default=os.environ["SM_CHANNEL_TEST"]) args, _ = parser.parse_known_args() # Set up logging logger = logging.getLogger(__name__) logging.basicConfig( level=logging.getLevelName("INFO"), handlers=[logging.StreamHandler(sys.stdout)], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) # load datasets train_dataset = load_from_disk(args.training_dir) test_dataset = load_from_disk(args.test_dir) logger.info(f" loaded train_dataset length is: {len(train_dataset)}") logger.info(f" loaded test_dataset length is: {len(test_dataset)}") # compute metrics function for binary classification def compute_metrics(pred): labels = pred.label_ids preds = pred.predictions.argmax(-1) precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average="binary") acc = accuracy_score(labels, preds) return {"accuracy": acc, "f1": f1, "precision": precision, "recall": recall} # download model from model hub model = AutoModelForSequenceClassification.from_pretrained(args.model_name) tokenizer = AutoTokenizer.from_pretrained(args.model_name) # define training args training_args = TrainingArguments( output_dir=args.checkpoints, num_train_epochs=args.epochs, per_device_train_batch_size=args.train_batch_size, per_device_eval_batch_size=args.eval_batch_size, warmup_steps=args.warmup_steps, evaluation_strategy="epoch", logging_dir=f"{args.checkpoints}/logs", learning_rate=args.learning_rate, ) # create Trainer instance trainer = Trainer( model=model, args=training_args, compute_metrics=compute_metrics, train_dataset=train_dataset, eval_dataset=test_dataset, tokenizer=tokenizer, ) # train model trainer.train() # evaluate model eval_result = trainer.evaluate(eval_dataset=test_dataset) # writes eval result to file which can be accessed later in s3 ouput with open(os.path.join(args.checkpoints, "eval_results.txt"), "w") as writer: print(f"***** Eval results *****") for key, value in sorted(eval_result.items()): writer.write(f"{key} = {value}\n") # Saves the model locally. In SageMaker, writing in /opt/ml/model sends it to S3 trainer.save_model(args.model_dir)
notebooks/sagemaker/06_sagemaker_metrics/scripts/train.py/0
{ "file_path": "notebooks/sagemaker/06_sagemaker_metrics/scripts/train.py", "repo_id": "notebooks", "token_count": 1415 }
151
# SageMaker push to hf.co/models example
notebooks/sagemaker/14_train_and_push_to_hub/README.md/0
{ "file_path": "notebooks/sagemaker/14_train_and_push_to_hub/README.md", "repo_id": "notebooks", "token_count": 12 }
152
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeepSpeed [DeepSpeed](https://www.deepspeed.ai/) is a library designed for speed and scale for distributed training of large models with billions of parameters. At its core is the Zero Redundancy Optimizer (ZeRO) that shards optimizer states (ZeRO-1), gradients (ZeRO-2), and parameters (ZeRO-3) across data parallel processes. This drastically reduces memory usage, allowing you to scale your training to billion parameter models. To unlock even more memory efficiency, ZeRO-Offload reduces GPU compute and memory by leveraging CPU resources during optimization. Both of these features are supported in 🤗 Accelerate, and you can use them with 🤗 PEFT. This guide will help you learn how to use our DeepSpeed [training script](https://github.com/huggingface/peft/blob/main/examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py). You'll configure the script to train a large model for conditional generation with ZeRO-3 and ZeRO-Offload. <Tip> 💡 To help you get started, check out our example training scripts for [causal language modeling](https://github.com/huggingface/peft/blob/main/examples/causal_language_modeling/peft_lora_clm_accelerate_ds_zero3_offload.py) and [conditional generation](https://github.com/huggingface/peft/blob/main/examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py). You can adapt these scripts for your own applications or even use them out of the box if your task is similar to the one in the scripts. </Tip> ## Configuration Start by running the following command to [create a DeepSpeed configuration file](https://huggingface.co/docs/accelerate/quicktour#launching-your-distributed-script) with 🤗 Accelerate. The `--config_file` flag allows you to save the configuration file to a specific location, otherwise it is saved as a `default_config.yaml` file in the 🤗 Accelerate cache. The configuration file is used to set the default options when you launch the training script. ```bash accelerate config --config_file ds_zero3_cpu.yaml ``` You'll be asked a few questions about your setup, and configure the following arguments. In this example, you'll use ZeRO-3 and ZeRO-Offload so make sure you pick those options. ```bash `zero_stage`: [0] Disabled, [1] optimizer state partitioning, [2] optimizer+gradient state partitioning and [3] optimizer+gradient+parameter partitioning `gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them. `gradient_clipping`: Enable gradient clipping with value. `offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2. `offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3. `zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3. `zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3. `mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training. ``` An example [configuration file](https://github.com/huggingface/peft/blob/main/examples/conditional_generation/accelerate_ds_zero3_cpu_offload_config.yaml) might look like the following. The most important thing to notice is that `zero_stage` is set to `3`, and `offload_optimizer_device` and `offload_param_device` are set to the `cpu`. ```yml compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 distributed_type: DEEPSPEED downcast_bf16: 'no' dynamo_backend: 'NO' fsdp_config: {} machine_rank: 0 main_training_function: main megatron_lm_config: {} mixed_precision: 'no' num_machines: 1 num_processes: 1 rdzv_backend: static same_network: true use_cpu: false ``` ## The important parts Let's dive a little deeper into the script so you can see what's going on, and understand how it works. Within the [`main`](https://github.com/huggingface/peft/blob/2822398fbe896f25d4dac5e468624dc5fd65a51b/examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py#L103) function, the script creates an [`~accelerate.Accelerator`] class to initialize all the necessary requirements for distributed training. <Tip> 💡 Feel free to change the model and dataset inside the `main` function. If your dataset format is different from the one in the script, you may also need to write your own preprocessing function. </Tip> The script also creates a configuration for the 🤗 PEFT method you're using, which in this case, is LoRA. The [`LoraConfig`] specifies the task type and important parameters such as the dimension of the low-rank matrices, the matrices scaling factor, and the dropout probability of the LoRA layers. If you want to use a different 🤗 PEFT method, make sure you replace `LoraConfig` with the appropriate [class](../package_reference/tuners). ```diff def main(): + accelerator = Accelerator() model_name_or_path = "facebook/bart-large" dataset_name = "twitter_complaints" + peft_config = LoraConfig( task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ) ``` Throughout the script, you'll see the [`~accelerate.Accelerator.main_process_first`] and [`~accelerate.Accelerator.wait_for_everyone`] functions which help control and synchronize when processes are executed. The [`get_peft_model`] function takes a base model and the [`peft_config`] you prepared earlier to create a [`PeftModel`]: ```diff model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) + model = get_peft_model(model, peft_config) ``` Pass all the relevant training objects to 🤗 Accelerate's [`~accelerate.Accelerator.prepare`] which makes sure everything is ready for training: ```py model, train_dataloader, eval_dataloader, test_dataloader, optimizer, lr_scheduler = accelerator.prepare( model, train_dataloader, eval_dataloader, test_dataloader, optimizer, lr_scheduler ) ``` The next bit of code checks whether the DeepSpeed plugin is used in the `Accelerator`, and if the plugin exists, then the `Accelerator` uses ZeRO-3 as specified in the configuration file: ```py is_ds_zero_3 = False if getattr(accelerator.state, "deepspeed_plugin", None): is_ds_zero_3 = accelerator.state.deepspeed_plugin.zero_stage == 3 ``` Inside the training loop, the usual `loss.backward()` is replaced by 🤗 Accelerate's [`~accelerate.Accelerator.backward`] which uses the correct `backward()` method based on your configuration: ```diff for epoch in range(num_epochs): with TorchTracemalloc() as tracemalloc: model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() ``` That is all! The rest of the script handles the training loop, evaluation, and even pushes it to the Hub for you. ## Train Run the following command to launch the training script. Earlier, you saved the configuration file to `ds_zero3_cpu.yaml`, so you'll need to pass the path to the launcher with the `--config_file` argument like this: ```bash accelerate launch --config_file ds_zero3_cpu.yaml examples/peft_lora_seq2seq_accelerate_ds_zero3_offload.py ``` You'll see some output logs that track memory usage during training, and once it's completed, the script returns the accuracy and compares the predictions to the labels: ```bash GPU Memory before entering the train : 1916 GPU Memory consumed at the end of the train (end-begin): 66 GPU Peak Memory consumed during the train (max-begin): 7488 GPU Total Peak Memory consumed during the train (max): 9404 CPU Memory before entering the train : 19411 CPU Memory consumed at the end of the train (end-begin): 0 CPU Peak Memory consumed during the train (max-begin): 0 CPU Total Peak Memory consumed during the train (max): 19411 epoch=4: train_ppl=tensor(1.0705, device='cuda:0') train_epoch_loss=tensor(0.0681, device='cuda:0') 100%|████████████████████████████████████████████████████████████████████████████████████████████| 7/7 [00:27<00:00, 3.92s/it] GPU Memory before entering the eval : 1982 GPU Memory consumed at the end of the eval (end-begin): -66 GPU Peak Memory consumed during the eval (max-begin): 672 GPU Total Peak Memory consumed during the eval (max): 2654 CPU Memory before entering the eval : 19411 CPU Memory consumed at the end of the eval (end-begin): 0 CPU Peak Memory consumed during the eval (max-begin): 0 CPU Total Peak Memory consumed during the eval (max): 19411 accuracy=100.0 eval_preds[:10]=['no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint', 'no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint'] dataset['train'][label_column][:10]=['no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint', 'no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint'] ```
peft/docs/source/accelerate/deepspeed-zero3-offload.md/0
{ "file_path": "peft/docs/source/accelerate/deepspeed-zero3-offload.md", "repo_id": "peft", "token_count": 2997 }
153
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # AutoPeftModels The `AutoPeftModel` classes loads the appropriate PEFT model for the task type by automatically inferring it from the configuration file. They are designed to quickly and easily load a PEFT model in a single line of code without having to worry about which exact model class you need or manually loading a [`PeftConfig`]. ## AutoPeftModel [[autodoc]] auto.AutoPeftModel - from_pretrained ## AutoPeftModelForCausalLM [[autodoc]] auto.AutoPeftModelForCausalLM ## AutoPeftModelForSeq2SeqLM [[autodoc]] auto.AutoPeftModelForSeq2SeqLM ## AutoPeftModelForSequenceClassification [[autodoc]] auto.AutoPeftModelForSequenceClassification ## AutoPeftModelForTokenClassification [[autodoc]] auto.AutoPeftModelForTokenClassification ## AutoPeftModelForQuestionAnswering [[autodoc]] auto.AutoPeftModelForQuestionAnswering ## AutoPeftModelForFeatureExtraction [[autodoc]] auto.AutoPeftModelForFeatureExtraction
peft/docs/source/package_reference/auto_class.md/0
{ "file_path": "peft/docs/source/package_reference/auto_class.md", "repo_id": "peft", "token_count": 470 }
154
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quicktour PEFT offers parameter-efficient methods for finetuning large pretrained models. The traditional paradigm is to finetune all of a model's parameters for each downstream task, but this is becoming exceedingly costly and impractical because of the enormous number of parameters in models today. Instead, it is more efficient to train a smaller number of prompt parameters or use a reparametrization method like low-rank adaptation (LoRA) to reduce the number of trainable parameters. This quicktour will show you PEFT's main features and how you can train or run inference on large models that would typically be inaccessible on consumer devices. ## Train Each PEFT method is defined by a [`PeftConfig`] class that stores all the important parameters for building a [`PeftModel`]. For example, to train with LoRA, load and create a [`LoraConfig`] class and specify the following parameters: - `task_type`: the task to train for (sequence-to-sequence language modeling in this case) - `inference_mode`: whether you're using the model for inference or not - `r`: the dimension of the low-rank matrices - `lora_alpha`: the scaling factor for the low-rank matrices - `lora_dropout`: the dropout probability of the LoRA layers ```python from peft import LoraConfig, TaskType peft_config = LoraConfig(task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1) ``` <Tip> See the [`LoraConfig`] reference for more details about other parameters you can adjust, such as the modules to target or the bias type. </Tip> Once the [`LoraConfig`] is setup, create a [`PeftModel`] with the [`get_peft_model`] function. It takes a base model - which you can load from the Transformers library - and the [`LoraConfig`] containing the parameters for how to configure a model for training with LoRA. Load the base model you want to finetune. ```python from transformers import AutoModelForSeq2SeqLM model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/mt0-large") ``` Wrap the base model and `peft_config` with the [`get_peft_model`] function to create a [`PeftModel`]. To get a sense of the number of trainable parameters in your model, use the [`print_trainable_parameters`] method. ```python from peft import get_peft_model model = get_peft_model(model, peft_config) model.print_trainable_parameters() "output: trainable params: 2359296 || all params: 1231940608 || trainable%: 0.19151053100118282" ``` Out of [bigscience/mt0-large's](https://huggingface.co/bigscience/mt0-large) 1.2B parameters, you're only training 0.19% of them! That is it 🎉! Now you can train the model with the Transformers [`~transformers.Trainer`], Accelerate, or any custom PyTorch training loop. For example, to train with the [`~transformers.Trainer`] class, setup a [`~transformers.TrainingArguments`] class with some training hyperparameters. ```py training_args = TrainingArguments( output_dir="your-name/bigscience/mt0-large-lora", learning_rate=1e-3, per_device_train_batch_size=32, per_device_eval_batch_size=32, num_train_epochs=2, weight_decay=0.01, evaluation_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, ) ``` Pass the model, training arguments, dataset, tokenizer, and any other necessary component to the [`~transformers.Trainer`], and call [`~transformers.Trainer.train`] to start training. ```py trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["test"], tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) trainer.train() ``` ### Save model After your model is finished training, you can save your model to a directory using the [`~transformers.PreTrainedModel.save_pretrained`] function. ```py model.save_pretrained("output_dir") ``` You can also save your model to the Hub (make sure you're logged in to your Hugging Face account first) with the [`~transformers.PreTrainedModel.push_to_hub`] function. ```python from huggingface_hub import notebook_login notebook_login() model.push_to_hub("your-name/bigscience/mt0-large-lora") ``` Both methods only save the extra PEFT weights that were trained, meaning it is super efficient to store, transfer, and load. For example, this [facebook/opt-350m](https://huggingface.co/ybelkada/opt-350m-lora) model trained with LoRA only contains two files: `adapter_config.json` and `adapter_model.safetensors`. The `adapter_model.safetensors` file is just 6.3MB! <div class="flex flex-col justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/PEFT-hub-screenshot.png"/> <figcaption class="text-center">The adapter weights for a opt-350m model stored on the Hub are only ~6MB compared to the full size of the model weights, which can be ~700MB.</figcaption> </div> ## Inference <Tip> Take a look at the [AutoPeftModel](package_reference/auto_class) API reference for a complete list of available `AutoPeftModel` classes. </Tip> Easily load any PEFT-trained model for inference with the [`AutoPeftModel`] class and the [`~transformers.PreTrainedModel.from_pretrained`] method: ```py from peft import AutoPeftModelForCausalLM from transformers import AutoTokenizer import torch model = AutoPeftModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") model = model.to("cuda") model.eval() inputs = tokenizer("Preheat the oven to 350 degrees and place the cookie dough", return_tensors="pt") outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=50) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0]) "Preheat the oven to 350 degrees and place the cookie dough in the center of the oven. In a large bowl, combine the flour, baking powder, baking soda, salt, and cinnamon. In a separate bowl, combine the egg yolks, sugar, and vanilla." ``` For other tasks that aren't explicitly supported with an `AutoPeftModelFor` class - such as automatic speech recognition - you can still use the base [`AutoPeftModel`] class to load a model for the task. ```py from peft import AutoPeftModel model = AutoPeftModel.from_pretrained("smangrul/openai-whisper-large-v2-LORA-colab") ``` ## Next steps Now that you've seen how to train a model with one of the PEFT methods, we encourage you to try out some of the other methods like prompt tuning. The steps are very similar to the ones shown in the quicktour: 1. prepare a [`PeftConfig`] for a PEFT method 2. use the [`get_peft_model`] method to create a [`PeftModel`] from the configuration and base model Then you can train it however you like! To load a PEFT model for inference, you can use the [`AutoPeftModel`] class. Feel free to also take a look at the task guides if you're interested in training a model with another PEFT method for a specific task such as semantic segmentation, multilingual automatic speech recognition, DreamBooth, token classification, and more.
peft/docs/source/quicktour.md/0
{ "file_path": "peft/docs/source/quicktour.md", "repo_id": "peft", "token_count": 2384 }
155
import os import torch import torch.nn as nn import transformers from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig from peft import LoraConfig, get_peft_model os.environ["CUDA_VISIBLE_DEVICES"] = "0" # -*- coding: utf-8 -*- """Finetune-opt-bnb-peft.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1jCkpikz0J2o20FBQmYmAGdiKmJGOMo-o ## Fine-tune large models using 🤗 `peft` adapters, `transformers` & `bitsandbytes` In this tutorial we will cover how we can fine-tune large language models using the very recent `peft` library and `bitsandbytes` for loading large models in 8-bit. The fine-tuning method will rely on a recent method called "Low Rank Adapters" (LoRA), instead of fine-tuning the entire model you just have to fine-tune these adapters and load them properly inside the model. After fine-tuning the model you can also share your adapters on the 🤗 Hub and load them very easily. Let's get started! ### Install requirements First, run the cells below to install the requirements: """ """### Model loading Here let's load the `opt-6.7b` model, its weights in half-precision (float16) are about 13GB on the Hub! If we load them in 8-bit we would require around 7GB of memory instead. """ free_in_GB = int(torch.cuda.mem_get_info()[0] / 1024**3) max_memory = f"{free_in_GB-2}GB" n_gpus = torch.cuda.device_count() max_memory = {i: max_memory for i in range(n_gpus)} model = AutoModelForCausalLM.from_pretrained( "facebook/opt-350m", max_memory=max_memory, quantization_config=BitsAndBytesConfig( load_in_4bit=True, llm_int8_threshold=6.0, llm_int8_has_fp16_weight=False, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", ), torch_dtype=torch.float16, ) tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") """### Post-processing on the model Finally, we need to apply some post-processing on the 8-bit model to enable training, let's freeze all our layers, and cast the layer-norm in `float32` for stability. We also cast the output of the last layer in `float32` for the same reasons. """ print(model) for param in model.parameters(): param.requires_grad = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability param.data = param.data.to(torch.float32) # model.gradient_checkpointing_enable() # reduce number of stored activations # model.model.decoder.project_in = lambda x: x.requires_grad_(True) class CastOutputToFloat(nn.Sequential): def forward(self, x): return super().forward(x).to(torch.float32) model.lm_head = CastOutputToFloat(model.lm_head) """### Apply LoRA Here comes the magic with `peft`! Let's load a `PeftModel` and specify that we are going to use low-rank adapters (LoRA) using `get_peft_model` utility function from `peft`. """ def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) config = LoraConfig( r=64, lora_alpha=32, target_modules=["q_proj", "v_proj", "out_proj", "fc1", "fc2"], lora_dropout=0.01, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) print_trainable_parameters(model) # Verifying the datatypes. dtypes = {} for _, p in model.named_parameters(): dtype = p.dtype if dtype not in dtypes: dtypes[dtype] = 0 dtypes[dtype] += p.numel() total = 0 for k, v in dtypes.items(): total += v for k, v in dtypes.items(): print(k, v, v / total) """### Training""" data = load_dataset("Abirate/english_quotes") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = transformers.Trainer( model=model, train_dataset=data["train"], args=transformers.TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=10, max_steps=20, learning_rate=3e-4, fp16=True, logging_steps=1, output_dir="outputs", ), data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False # silence the warnings. Please re-enable for inference! trainer.train() # from huggingface_hub import notebook_login # notebook_login() # model.push_to_hub("ybelkada/opt-6.7b-lora", use_auth_token=True) """## Load adapters from the Hub You can also directly load adapters from the Hub using the commands below: """ # import torch # from peft import PeftModel, PeftConfig # from transformers import AutoModelForCausalLM, AutoTokenizer # # peft_model_id = "ybelkada/opt-6.7b-lora" # config = PeftConfig.from_pretrained(peft_model_id) # model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=True, load_in_8bit=True, device_map='auto') # tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # ## Load the Lora model # model = PeftModel.from_pretrained(model, peft_model_id) # # """## Inference # # You can then directly use the trained model or the model that you have loaded from the 🤗 Hub for inference as you would do it usually in `transformers`. # """ # batch = tokenizer("Two things are infinite: ", return_tensors="pt") model.config.use_cache = False # silence the warnings. Please re-enable for inference! model.eval() with torch.cuda.amp.autocast(): output_tokens = model.generate(**batch, max_new_tokens=50) print("\n\n", tokenizer.decode(output_tokens[0], skip_special_tokens=True)) # model.save('./test.pt') # """As you can see by fine-tuning for few steps we have almost recovered the quote from Albert Einstein that is present in the [training data](https://huggingface.co/datasets/Abirate/english_quotes)."""
peft/examples/fp4_finetuning/finetune_fp4_opt_bnb_peft.py/0
{ "file_path": "peft/examples/fp4_finetuning/finetune_fp4_opt_bnb_peft.py", "repo_id": "peft", "token_count": 2309 }
156
<jupyter_start><jupyter_code>import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from pathlib import Path from typing import Optional import psutil import json import torch import torch.nn.functional as F import torch.utils.checkpoint from torch.utils.data import Dataset import datasets import diffusers import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel from diffusers import DDPMScheduler, PNDMScheduler, StableDiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig, CLIPFeatureExtractor from peft import PeftModel, LoraConfig, get_peft_model_state_dict, set_peft_model_state_dict # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.10.0.dev0") logger = get_logger(__name__) MODEL_NAME = "CompVis/stable-diffusion-v1-4" # "stabilityai/stable-diffusion-2-1-base" INSTANCE_PROMPT = "a photo of sks dog" base_path = "/home/sourab/temp/" def get_lora_sd_pipeline( ckpt_dir, base_model_name_or_path=None, dtype=torch.float16, device="cuda", adapter_name="default" ): unet_sub_dir = os.path.join(ckpt_dir, "unet") text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder") if os.path.exists(text_encoder_sub_dir) and base_model_name_or_path is None: config = LoraConfig.from_pretrained(text_encoder_sub_dir) base_model_name_or_path = config.base_model_name_or_path if base_model_name_or_path is None: raise ValueError("Please specify the base model name or path") pipe = StableDiffusionPipeline.from_pretrained( base_model_name_or_path, torch_dtype=dtype, requires_safety_checker=False ).to(device) pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name) if os.path.exists(text_encoder_sub_dir): pipe.text_encoder = PeftModel.from_pretrained( pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name ) if dtype in (torch.float16, torch.bfloat16): pipe.unet.half() pipe.text_encoder.half() pipe.to(device) return pipe def load_adapter(pipe, ckpt_dir, adapter_name): unet_sub_dir = os.path.join(ckpt_dir, "unet") text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder") pipe.unet.load_adapter(unet_sub_dir, adapter_name=adapter_name) if os.path.exists(text_encoder_sub_dir): pipe.text_encoder.load_adapter(text_encoder_sub_dir, adapter_name=adapter_name) def set_adapter(pipe, adapter_name): pipe.unet.set_adapter(adapter_name) if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.set_adapter(adapter_name) def merging_lora_with_base(pipe, ckpt_dir, adapter_name="default"): unet_sub_dir = os.path.join(ckpt_dir, "unet") text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder") if isinstance(pipe.unet, PeftModel): pipe.unet.set_adapter(adapter_name) else: pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name) pipe.unet = pipe.unet.merge_and_unload() if os.path.exists(text_encoder_sub_dir): if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.set_adapter(adapter_name) else: pipe.text_encoder = PeftModel.from_pretrained( pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name ) pipe.text_encoder = pipe.text_encoder.merge_and_unload() return pipe def create_weighted_lora_adapter(pipe, adapters, weights, adapter_name="default"): pipe.unet.add_weighted_adapter(adapters, weights, adapter_name) if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.add_weighted_adapter(adapters, weights, adapter_name) return pipe %%time pipe = get_lora_sd_pipeline(os.path.join(base_path, "dog_dreambooth_updated"), adapter_name="dog") %%time load_adapter(pipe, os.path.join(base_path, "toy_dreambooth"), adapter_name="toy") pipe = create_weighted_lora_adapter(pipe, ["toy", "dog"], [1.0, 1.05], adapter_name="toy_dog") %%time set_adapter(pipe, adapter_name="dog") prompt = "sks dog playing fetch in the park" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image %%time set_adapter(pipe, adapter_name="toy") prompt = "narendra modi rendered in the style of <1>" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image set_adapter(pipe, adapter_name="dog") prompt = "sks dog in a big red bucket" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image set_adapter(pipe, adapter_name="toy") prompt = "superman rendered in the style of <1>, close up potrait" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image set_adapter(pipe, adapter_name="toy_dog") prompt = "sks dog rendered in the style of <1>, close up potrait, 4K HD" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image<jupyter_output><empty_output>
peft/examples/lora_dreambooth/lora_dreambooth_inference.ipynb/0
{ "file_path": "peft/examples/lora_dreambooth/lora_dreambooth_inference.ipynb", "repo_id": "peft", "token_count": 2282 }
157
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import importlib.metadata as importlib_metadata from functools import lru_cache import packaging.version def is_bnb_available() -> bool: return importlib.util.find_spec("bitsandbytes") is not None def is_bnb_4bit_available() -> bool: if not is_bnb_available(): return False import bitsandbytes as bnb return hasattr(bnb.nn, "Linear4bit") def is_auto_gptq_available(): if importlib.util.find_spec("auto_gptq") is not None: AUTOGPTQ_MINIMUM_VERSION = packaging.version.parse("0.5.0") version_autogptq = packaging.version.parse(importlib_metadata.version("auto_gptq")) if AUTOGPTQ_MINIMUM_VERSION <= version_autogptq: return True else: raise ImportError( f"Found an incompatible version of auto-gptq. Found version {version_autogptq}, " f"but only versions above {AUTOGPTQ_MINIMUM_VERSION} are supported" ) def is_optimum_available() -> bool: return importlib.util.find_spec("optimum") is not None @lru_cache() def is_torch_tpu_available(check_device=True): "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" if importlib.util.find_spec("torch_xla") is not None: if check_device: # We need to check if `xla_device` can be found, will raise a RuntimeError if not try: import torch_xla.core.xla_model as xm _ = xm.xla_device() return True except RuntimeError: return False return True return False
peft/src/peft/import_utils.py/0
{ "file_path": "peft/src/peft/import_utils.py", "repo_id": "peft", "token_count": 851 }
158
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn def llama_rotate_half(x: torch.Tensor) -> torch.Tensor: """ Rotate half the hidden dims of the input. This function was duplicated verbatim from: https://github.com/huggingface/transformers/blob/1de8ce9ee1191ba761a593ac15d9ccbf5851bfc5/src/transformers/models/llama/modeling_llama.py#L126 This was done to eliminate the Llama transformers implementation as a dependency of this file. Note that some other functions were also adapted from the transformers implementation but were modified. """ x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def llama_apply_rotary_pos_emb(q, cos, sin, position_ids): """ Apply rotary position embedding to query states in the Llama model. This function was adapted from: https://github.com/huggingface/transformers/blob/1de8ce9ee1191ba761a593ac15d9ccbf5851bfc5/src/transformers/models/llama/modeling_llama.py#L133 It was modified to remove unnecessary processing of key states. The method is compatible with transformers <= 4.34.2 and also with the latest version (>=4.35). """ # In previous transformers version cos/sin cached had a shape of 4D if len(cos.shape) == 4: gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1] gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3]) cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) # In the new version, it is 2D so we fall back to the new implementation # https://github.com/huggingface/transformers/blame/eef7ea98c31a333bacdc7ae7a2372bde772be8e4/src/transformers/models/llama/modeling_llama.py#L222-L226 else: cos = cos[position_ids].unsqueeze(1) sin = sin[position_ids].unsqueeze(1) q_embed = (q * cos) + (llama_rotate_half(q) * sin) return q_embed def llama_compute_query_states(model: nn.Module, **kwargs) -> torch.Tensor: """ Compute query states for Llama models specifically. They need to be recomputed as the forward() method of the original LlamaModel in the transformers library does not return them. See the related discussion in the PR: https://github.com/huggingface/peft/pull/268 """ hidden_states = kwargs.get("hidden_states") position_ids = kwargs.get("position_ids") past_key_value = kwargs.get("past_key_value") bsz, q_len, _ = hidden_states.size() query_states = model.q_proj(hidden_states).view(bsz, q_len, model.num_heads, model.head_dim).transpose(1, 2) value_states = model.v_proj(hidden_states).view(bsz, q_len, model.num_heads, model.head_dim).transpose(1, 2) seq_len = q_len if past_key_value is not None: if isinstance(past_key_value, tuple): # for transformers <= 4.35 seq_len += past_key_value[0].shape[-2] else: # since transformers 4.36, this is a DynamicCache instance seq_len += past_key_value.get_seq_length(model.layer_idx) cos, sin = model.rotary_emb(value_states, seq_len=seq_len) return llama_apply_rotary_pos_emb(query_states, cos, sin, position_ids) def is_adaption_prompt_trainable(params: str) -> bool: """Return True if module is trainable under adaption prompt fine-tuning.""" return params.split(".")[-1].startswith("adaption_")
peft/src/peft/tuners/adaption_prompt/utils.py/0
{ "file_path": "peft/src/peft/tuners/adaption_prompt/utils.py", "repo_id": "peft", "token_count": 1532 }
159
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from dataclasses import dataclass, field from typing import List, Literal, Optional, Union from peft.config import PeftConfig from peft.utils import PeftType @dataclass class LoftQConfig: """ This is the sub-configuration class to store the configuration of a [`LoraModel`]. Args: bits_pattern (`dict`): The mapping from layer names or regexp expression to bits which are different from the default bits specified by `bits`. For example, `{model.decoder.layers.0.encoder_attn.k_proj: 2`}. bits (`int`): Quantization bits for LoftQ. iter (`int`): Alternating iterations for LoftQ. fake (`bool`): True: use fp16/fp32; used for first time to save weights. False: use bitsandbytes 4bit linear models. weights can't be saved. Recommend to set to True, save the weights and load the saved weights in 4 bits. """ loftq_bits: int = field(default=4, metadata={"help": "Quantization bits for LoftQ"}) loftq_iter: int = field(default=1, metadata={"help": "Alternating iterations for LoftQ"}) @dataclass class LoraConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`LoraModel`]. Args: r (`int`): Lora attention dimension (the "rank"). target_modules (`Optional[Union[List[str], str]]`): The names of the modules to apply the adapter to. If this is specified, only the modules with the specified names will be replaced. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen, excluding the output layer. If this is not specified, modules will be chosen according to the model architecture. If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually. lora_alpha (`int`): The alpha parameter for Lora scaling. lora_dropout (`float`): The dropout probability for Lora layers. fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`. bias (`str`): Bias type for LoRA. Can be 'none', 'all' or 'lora_only'. If 'all' or 'lora_only', the corresponding biases will be updated during training. Be aware that this means that, even when disabling the adapters, the model will not produce the same output as the base model would have without adaptation. use_rslora (`bool`): When set to True, uses <a href='https://doi.org/10.48550/arXiv.2312.03732'>Rank-Stabilized LoRA</a> which sets the adapter scaling factor to `lora_alpha/math.sqrt(r)`, since it was proven to work better. Otherwise, it will use the original default value of `lora_alpha/r`. modules_to_save (`List[str]`): List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint. init_lora_weights (`bool` | `Literal["gaussian", "loftq"]`): How to initialize the weights of the adapter layers. Passing True (default) results in the default initialization from the reference implementation from Microsoft. Passing 'gaussian' results in Gaussian initialization scaled by the LoRA rank for linear and layers. Setting the initialization to False leads to completely random initialization and is discouraged. Pass `'loftq'` to use LoftQ initialization. layers_to_transform (`Union[List[int], int]`): The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices that are specified in this list. If a single integer is passed, it will apply the transformations on the layer at this index. layers_pattern (`str`): The layer pattern name, used only if `layers_to_transform` is different from `None`. rank_pattern (`dict`): The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. alpha_pattern (`dict`): The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `lora_alpha`. megatron_config (`Optional[dict]`): The TransformerConfig arguments for Megatron. It is used to create LoRA's parallel linear layer. You can get it like this, `core_transformer_config_from_args(get_args())`, these two functions being from Megatron. The arguments will be used to initialize the TransformerConfig of Megatron. You need to specify this parameter when you want to apply LoRA to the ColumnParallelLinear and RowParallelLinear layers of megatron. megatron_core (`Optional[str]`): The core module from Megatron to use, defaults to `"megatron.core"`. loftq_config (`Optional[LoftQConfig]`): The configuration of LoftQ. If this is not None, then LoftQ will be used to quantize the backbone weights and initialize Lora layers. Also pass `init_lora_weights='loftq'`. Note that you should not pass a quantized model in this case, as LoftQ will quantize the model itself. """ r: int = field(default=8, metadata={"help": "Lora attention dimension"}) target_modules: Optional[Union[List[str], str]] = field( default=None, metadata={ "help": ( "List of module names or regex expression of the module names to replace with LoRA." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'." "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer." "If not specified, modules will be chosen according to the model architecture, If the architecture is " "not known, an error will be raised -- in this case, you shoud specify the target modules manually." ), }, ) lora_alpha: int = field(default=8, metadata={"help": "Lora alpha"}) lora_dropout: float = field(default=0.0, metadata={"help": "Lora dropout"}) fan_in_fan_out: bool = field( default=False, metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, ) bias: Literal["none", "all", "lora_only"] = field( default="none", metadata={"help": "Bias type for Lora. Can be 'none', 'all' or 'lora_only'"} ) use_rslora: bool = field( default=False, metadata={ "help": ( "When set to True, uses Rank-Stabilized LoRA doi.org/10.48550/arXiv.2312.03732" " which sets the adapter scaling factor to `lora_alpha/math.sqrt(r)`, since it" " was proven to work better. Otherwise, it will use the original default" " value of `lora_alpha/r`." ) }, ) modules_to_save: Optional[List[str]] = field( default=None, metadata={ "help": "List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) init_lora_weights: bool | Literal["gaussian", "loftq"] = field( default=True, metadata={ "help": ( "How to initialize the weights of the LoRA layers. Passing True (default) results in the default " "initialization from the reference implementation from Microsoft. Passing 'gaussian' results " "in Gaussian initialization scaled by the LoRA rank for linear and layers. Setting the initialization " "to False leads to completely random initialization and is discouraged." "Pass `'loftq'` to use LoftQ initialization" ), }, ) layers_to_transform: Optional[Union[List[int], int]] = field( default=None, metadata={ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index. " "This only works when target_modules is a list of str." }, ) layers_pattern: Optional[Union[List[str], str]] = field( default=None, metadata={ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern." "This only works when target_modules is a list of str." }, ) rank_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. " "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}" ) }, ) alpha_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `lora_alpha`. " "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}" ) }, ) megatron_config: Optional[dict] = field( default=None, metadata={ "help": ( "The TransformerConfig from Megatron. It is used to create LoRA's parallel linear layer." "You can get it like this, `core_transformer_config_from_args(get_args())`, " "these two functions being from Megatron." "You need to specify this parameter when you want to apply LoRA to the ColumnParallelLinear and " "RowParallelLinear layers of megatron." "It should be noted that we may not be able to use the `save_pretrained` and `from_pretrained` " "functions, because TransformerConfig may not necessarily be serialized." "But when using megatron, we can use `get_peft_model_state_dict` function and " "megatron's framework, they can also save and load models and configurations." ) }, ) megatron_core: Optional[str] = field( default="megatron.core", metadata={ "help": ( "The core module from Megatron, it is used to create LoRA's parallel linear layer. " "It only needs to be passed in when you need to use your own modified megatron core module. " "Otherwise, it will use the default value `megatron.core`. " ) }, ) # dict type is used when loading config.json loftq_config: Union[LoftQConfig, dict] = field( default_factory=dict, metadata={ "help": ( "The configuration of LoftQ. If this is passed, then LoftQ will be used to quantize the backbone " "weights and initialize Lora layers. Also set `init_lora_weights='loftq'` in this case." ) }, ) def __post_init__(self): self.peft_type = PeftType.LORA self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) # if target_modules is a regex expression, then layers_to_transform should be None if isinstance(self.target_modules, str) and self.layers_to_transform is not None: raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.") # if target_modules is a regex expression, then layers_pattern should be None if isinstance(self.target_modules, str) and self.layers_pattern is not None: raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.") # handle init_lora_weights and loftq_config if self.init_lora_weights == "loftq": import importlib if not importlib.util.find_spec("scipy"): raise ImportError("The required package 'scipy' is not installed. Please install it to continue.") if self.loftq_config is None: raise ValueError("`loftq_config` must be specified when `init_lora_weights` is 'loftq'.") # convert loftq_config to dict if self.loftq_config and not isinstance(self.loftq_config, dict): self.loftq_config = vars(self.loftq_config)
peft/src/peft/tuners/lora/config.py/0
{ "file_path": "peft/src/peft/tuners/lora/config.py", "repo_id": "peft", "token_count": 5286 }
160
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from dataclasses import dataclass, field from typing import Union from peft.config import PromptLearningConfig from peft.utils import PeftType class PromptEncoderReparameterizationType(str, enum.Enum): MLP = "MLP" LSTM = "LSTM" @dataclass class PromptEncoderConfig(PromptLearningConfig): """ This is the configuration class to store the configuration of a [`PromptEncoder`]. Args: encoder_reparameterization_type (Union[[`PromptEncoderReparameterizationType`], `str`]): The type of reparameterization to use. encoder_hidden_size (`int`): The hidden size of the prompt encoder. encoder_num_layers (`int`): The number of layers of the prompt encoder. encoder_dropout (`float`): The dropout probability of the prompt encoder. """ encoder_reparameterization_type: Union[str, PromptEncoderReparameterizationType] = field( default=PromptEncoderReparameterizationType.MLP, metadata={"help": "How to reparameterize the prompt encoder"}, ) encoder_hidden_size: int = field( default=None, metadata={"help": "The hidden size of the prompt encoder"}, ) encoder_num_layers: int = field( default=2, metadata={"help": "The number of layers of the prompt encoder"}, ) encoder_dropout: float = field( default=0.0, metadata={"help": "The dropout of the prompt encoder"}, ) def __post_init__(self): self.peft_type = PeftType.P_TUNING
peft/src/peft/tuners/p_tuning/config.py/0
{ "file_path": "peft/src/peft/tuners/p_tuning/config.py", "repo_id": "peft", "token_count": 739 }
161
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Reference code: https://github.com/yxli2123/LoftQ/blob/main/utils.py # Reference paper: https://arxiv.org/abs/2310.08659 import logging from typing import Union import torch from peft.import_utils import is_bnb_4bit_available, is_bnb_available if is_bnb_available(): import bitsandbytes as bnb class NFQuantizer: def __init__(self, num_bits=2, device="cuda", method="normal", block_size=64, *args, **kwargs): super().__init__(*args, **kwargs) self.num_bits = num_bits self.device = device self.method = method self.block_size = block_size if self.method == "normal": self.norm_lookup_table = self.create_normal_map(num_bits=self.num_bits) self.norm_lookup_table = self.norm_lookup_table.to(device) elif self.method == "uniform": self.norm_lookup_table = self.create_uniform_map(num_bits=self.num_bits) self.norm_lookup_table = self.norm_lookup_table.to(device) else: raise NotImplementedError("Other quantization methods not supported yet.") @staticmethod def create_uniform_map(symmetric=False, num_bits=4): if symmetric: # print("symmetric uniform quantization") negative = torch.linspace(-1, 0, 2 ** (num_bits - 1)) positive = torch.linspace(0, 1, 2 ** (num_bits - 1)) table = torch.cat([negative, positive[1:]]) else: # print("asymmetric uniform quantization") table = torch.linspace(-1, 1, 2**num_bits) return table @staticmethod def create_normal_map(offset=0.9677083, symmetric=False, num_bits=2): try: from scipy.stats import norm except ImportError: raise ImportError("The required package 'scipy' is not installed. Please install it to continue.") variations = 2**num_bits if symmetric: v = norm.ppf(torch.linspace(1 - offset, offset, variations + 1)).tolist() values = [] for index in range(len(v) - 1): values.append(0.5 * v[index] + 0.5 * v[index + 1]) v = values else: # one more positive value, this is an asymmetric type v1 = norm.ppf(torch.linspace(offset, 0.5, variations // 2 + 1)[:-1]).tolist() v2 = [0] v3 = (-norm.ppf(torch.linspace(offset, 0.5, variations // 2)[:-1])).tolist() v = v1 + v2 + v3 values = torch.Tensor(v) values = values.sort().values values /= values.max() return values def quantize_tensor(self, weight): max_abs = torch.abs(weight).max() weight_normed = weight / max_abs weight_normed_expanded = weight_normed.unsqueeze(-1) # Reshape L to have the same number of dimensions as X_expanded L_reshaped = torch.tensor(self.norm_lookup_table).reshape(1, -1) # Calculate the absolute difference between X_expanded and L_reshaped abs_diff = torch.abs(weight_normed_expanded - L_reshaped) # Find the index of the minimum absolute difference for each element qweight = torch.argmin(abs_diff, dim=-1) return qweight, max_abs def dequantize_tensor(self, qweight, max_abs): qweight_flatten = qweight.flatten() weight_normed = self.norm_lookup_table[qweight_flatten] weight = weight_normed * max_abs weight = weight.reshape(qweight.shape) return weight def quantize_block(self, weight): if len(weight.shape) != 2: raise ValueError(f"Only support 2D matrix, but your input has {len(weight.shape)} dimensions.") if weight.shape[0] * weight.shape[1] % self.block_size != 0: raise ValueError( f"Weight with shape ({weight.shape[0]} x {weight.shape[1]}) " f"is not dividable by block size {self.block_size}." ) M, N = weight.shape device = weight.device # Quantization weight_flatten = weight.flatten() # (M*N, ) weight_block = weight_flatten.reshape(-1, self.block_size) # (L, B), L = M * N / B if self.method == "normal": weight_max = weight_block.abs().max(dim=-1)[0] # (L, 1) elif self.method == "uniform": weight_max = weight_block.mean(dim=-1) + 2.5 * weight_block.std(dim=-1) else: raise NotImplementedError("Method not supported yet.") weight_max = weight_max.unsqueeze(-1) weight_divabs = weight_block / weight_max # (L, B) weight_divabs = weight_divabs.unsqueeze(-1) # (L, B, 1) L_reshaped = self.norm_lookup_table.reshape(1, -1) # (1, 2**K) abs_diff = torch.abs(weight_divabs - L_reshaped) # (L, B, 2**K) qweight = torch.argmin(abs_diff, dim=-1) # (L, B) # Pack multiple k-bit into uint8 qweight = qweight.reshape(-1, 8 // self.num_bits) qweight_pack = torch.zeros((M * N // 8 * self.num_bits, 1), dtype=torch.uint8, device=device) # data format example: # [1, 0, 3, 2] or [01, 00, 11, 10] -> [10110001], LIFO for i in range(8 // self.num_bits): qweight[:, i] = qweight[:, i] << i * self.num_bits qweight_pack[:, 0] |= qweight[:, i] return qweight_pack, weight_max, weight.shape def dequantize_block(self, qweight, weight_max, weight_shape): # unpack weight device = qweight.device weight = torch.zeros((qweight.shape[0], 8 // self.num_bits), dtype=torch.float32, device=device) for i in range(8 // self.num_bits): lookup_table_idx = qweight.to(torch.long) % 2**self.num_bits # get the most right 2 bits lookup_table_idx = lookup_table_idx.to(torch.long) weight[:, i] = self.norm_lookup_table[lookup_table_idx].squeeze() qweight = qweight >> self.num_bits # right shift 2 bits of the original data weight_block = weight.reshape(-1, self.block_size) weight = weight_block * weight_max weight = weight.reshape(weight_shape) return weight def _low_rank_decomposition(weight, reduced_rank=32): """ :param weight: The matrix to decompose, of shape (H, W) :param reduced_rank: the final rank :return: """ matrix_dimension = len(weight.size()) if matrix_dimension != 2: raise ValueError(f"Only support 2D matrix, but your input has {matrix_dimension} dimensions.") # Use SVD to decompose a matrix, default full_matrices is False to save parameters U, S, Vh = torch.linalg.svd(weight, full_matrices=False) L = U @ (torch.sqrt(torch.diag(S)[:, 0:reduced_rank])) R = torch.sqrt(torch.diag(S)[0:reduced_rank, :]) @ Vh return {"L": L, "R": R, "U": U, "S": S, "Vh": Vh, "reduced_rank": reduced_rank} @torch.no_grad() def loftq_init(weight: Union[torch.Tensor, torch.nn.Parameter], num_bits: int, reduced_rank: int, num_iter=1): if num_bits not in [2, 4, 8]: raise ValueError("Only support 2, 4, 8 bits quantization") if num_iter <= 0: raise ValueError("Number of iterations must be greater than 0") out_feature, in_feature = weight.size() device = weight.device dtype = weight.dtype logging.info( f"Weight: ({out_feature}, {in_feature}) | Rank: {reduced_rank} " f"| Num Iter: {num_iter} | Num Bits: {num_bits}" ) if not is_bnb_4bit_available() or num_bits in [2, 8]: quantizer = NFQuantizer(num_bits=num_bits, device=device, method="normal", block_size=64) compute_device = device else: compute_device = "cuda" weight = weight.to(device=compute_device, dtype=torch.float32) res = weight.clone() for i in range(num_iter): torch.cuda.empty_cache() # Quantization if num_bits == 4 and is_bnb_4bit_available(): qweight = bnb.nn.Params4bit( res.to("cpu"), requires_grad=False, compress_statistics=False, quant_type="nf4" ).to(compute_device) dequantized_weight = bnb.functional.dequantize_4bit(qweight.data, qweight.quant_state) else: quantized_weight, max_abs, shape = quantizer.quantize_block(res) dequantized_weight = quantizer.dequantize_block(quantized_weight, max_abs, shape) res = weight - dequantized_weight # Decompose the residual by SVD output = _low_rank_decomposition(res, reduced_rank=reduced_rank) L, R, reduced_rank = output["L"], output["R"], output["reduced_rank"] res = weight - torch.mm(L, R) lora_A, lora_B = R, L return dequantized_weight.to(device=device, dtype=dtype), lora_A, lora_B
peft/src/peft/utils/loftq_utils.py/0
{ "file_path": "peft/src/peft/utils/loftq_utils.py", "repo_id": "peft", "token_count": 4009 }
162
include timm/models/_pruned/*.txt include timm/data/_info/*.txt include timm/data/_info/*.json
pytorch-image-models/MANIFEST.in/0
{ "file_path": "pytorch-image-models/MANIFEST.in", "repo_id": "pytorch-image-models", "token_count": 34 }
163
# FBNet **FBNet** is a type of convolutional neural architectures discovered through [DNAS](https://paperswithcode.com/method/dnas) neural architecture search. It utilises a basic type of image model block inspired by [MobileNetv2](https://paperswithcode.com/method/mobilenetv2) that utilises depthwise convolutions and an inverted residual structure (see components). The principal building block is the [FBNet Block](https://paperswithcode.com/method/fbnet-block). {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{wu2019fbnet, title={FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search}, author={Bichen Wu and Xiaoliang Dai and Peizhao Zhang and Yanghan Wang and Fei Sun and Yiming Wu and Yuandong Tian and Peter Vajda and Yangqing Jia and Kurt Keutzer}, year={2019}, eprint={1812.03443}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: FBNet Paper: Title: 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search' URL: https://paperswithcode.com/paper/fbnet-hardware-aware-efficient-convnet-design Models: - Name: fbnetc_100 In Collection: FBNet Metadata: FLOPs: 508940064 Parameters: 5570000 File Size: 22525094 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Dropout - FBNet Block - Global Average Pooling - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x GPUs ID: fbnetc_100 LR: 0.1 Epochs: 360 Layers: 22 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0005 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L985 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.12% Top 5 Accuracy: 92.37% -->
pytorch-image-models/docs/models/.templates/models/fbnet.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/fbnet.md", "repo_id": "pytorch-image-models", "token_count": 896 }
164
# MnasNet **MnasNet** is a type of convolutional neural network optimized for mobile devices that is discovered through mobile neural architecture search, which explicitly incorporates model latency into the main objective so that the search can identify a model that achieves a good trade-off between accuracy and latency. The main building block is an [inverted residual block](https://paperswithcode.com/method/inverted-residual-block) (from [MobileNetV2](https://paperswithcode.com/method/mobilenetv2)). {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{tan2019mnasnet, title={MnasNet: Platform-Aware Neural Architecture Search for Mobile}, author={Mingxing Tan and Bo Chen and Ruoming Pang and Vijay Vasudevan and Mark Sandler and Andrew Howard and Quoc V. Le}, year={2019}, eprint={1807.11626}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: MNASNet Paper: Title: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile' URL: https://paperswithcode.com/paper/mnasnet-platform-aware-neural-architecture Models: - Name: mnasnet_100 In Collection: MNASNet Metadata: FLOPs: 416415488 Parameters: 4380000 File Size: 17731774 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - Global Average Pooling - Inverted Residual Block - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet ID: mnasnet_100 Layers: 100 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4000 Image Size: '224' Interpolation: bicubic RMSProp Decay: 0.9 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L894 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 74.67% Top 5 Accuracy: 92.1% - Name: semnasnet_100 In Collection: MNASNet Metadata: FLOPs: 414570766 Parameters: 3890000 File Size: 15731489 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - Global Average Pooling - Inverted Residual Block - Max Pooling - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Data: - ImageNet ID: semnasnet_100 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L928 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.45% Top 5 Accuracy: 92.61% -->
pytorch-image-models/docs/models/.templates/models/mnasnet.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/mnasnet.md", "repo_id": "pytorch-image-models", "token_count": 1292 }
165
# SelecSLS **SelecSLS** uses novel selective long and short range skip connections to improve the information flow allowing for a drastically faster network without compromising accuracy. {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{Mehta_2020, title={XNect}, volume={39}, ISSN={1557-7368}, url={http://dx.doi.org/10.1145/3386569.3392410}, DOI={10.1145/3386569.3392410}, number={4}, journal={ACM Transactions on Graphics}, publisher={Association for Computing Machinery (ACM)}, author={Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian}, year={2020}, month={Jul} } ``` <!-- Type: model-index Collections: - Name: SelecSLS Paper: Title: 'XNect: Real-time Multi-Person 3D Motion Capture with a Single RGB Camera' URL: https://paperswithcode.com/paper/xnect-real-time-multi-person-3d-human-pose Models: - Name: selecsls42b In Collection: SelecSLS Metadata: FLOPs: 3824022528 Parameters: 32460000 File Size: 129948954 Architecture: - Batch Normalization - Convolution - Dense Connections - Dropout - Global Average Pooling - ReLU - SelecSLS Block Tasks: - Image Classification Training Techniques: - Cosine Annealing - Random Erasing Training Data: - ImageNet ID: selecsls42b Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L335 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls42b-8af30141.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.18% Top 5 Accuracy: 93.39% - Name: selecsls60 In Collection: SelecSLS Metadata: FLOPs: 4610472600 Parameters: 30670000 File Size: 122839714 Architecture: - Batch Normalization - Convolution - Dense Connections - Dropout - Global Average Pooling - ReLU - SelecSLS Block Tasks: - Image Classification Training Techniques: - Cosine Annealing - Random Erasing Training Data: - ImageNet ID: selecsls60 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L342 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60-bbf87526.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.99% Top 5 Accuracy: 93.83% - Name: selecsls60b In Collection: SelecSLS Metadata: FLOPs: 4657653144 Parameters: 32770000 File Size: 131252898 Architecture: - Batch Normalization - Convolution - Dense Connections - Dropout - Global Average Pooling - ReLU - SelecSLS Block Tasks: - Image Classification Training Techniques: - Cosine Annealing - Random Erasing Training Data: - ImageNet ID: selecsls60b Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L349 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60b-94e619b5.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.41% Top 5 Accuracy: 94.18% -->
pytorch-image-models/docs/models/.templates/models/selecsls.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/selecsls.md", "repo_id": "pytorch-image-models", "token_count": 1608 }
166
# Vision Transformer (ViT) The **Vision Transformer** is a model for image classification that employs a Transformer-like architecture over patches of the image. This includes the use of [Multi-Head Attention](https://paperswithcode.com/method/multi-head-attention), [Scaled Dot-Product Attention](https://paperswithcode.com/method/scaled) and other architectural features seen in the [Transformer](https://paperswithcode.com/method/transformer) architecture traditionally used for NLP. {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{dosovitskiy2020image, title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, author={Alexey Dosovitskiy and Lucas Beyer and Alexander Kolesnikov and Dirk Weissenborn and Xiaohua Zhai and Thomas Unterthiner and Mostafa Dehghani and Matthias Minderer and Georg Heigold and Sylvain Gelly and Jakob Uszkoreit and Neil Houlsby}, year={2020}, eprint={2010.11929}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Vision Transformer Paper: Title: 'An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale' URL: https://paperswithcode.com/paper/an-image-is-worth-16x16-words-transformers-1 Models: - Name: vit_base_patch16_224 In Collection: Vision Transformer Metadata: FLOPs: 67394605056 Parameters: 86570000 File Size: 346292833 Architecture: - Attention Dropout - Convolution - Dense Connections - Dropout - GELU - Layer Normalization - Multi-Head Attention - Scaled Dot-Product Attention - Tanh Activation Tasks: - Image Classification Training Techniques: - Cosine Annealing - Gradient Clipping - SGD with Momentum Training Data: - ImageNet - JFT-300M Training Resources: TPUv3 ID: vit_base_patch16_224 LR: 0.0008 Epochs: 90 Dropout: 0.0 Crop Pct: '0.9' Batch Size: 4096 Image Size: '224' Warmup Steps: 10000 Weight Decay: 0.03 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/5f9aff395c224492e9e44248b15f44b5cc095d9c/timm/models/vision_transformer.py#L503 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.78% Top 5 Accuracy: 96.13% - Name: vit_base_patch16_384 In Collection: Vision Transformer Metadata: FLOPs: 49348245504 Parameters: 86860000 File Size: 347460194 Architecture: - Attention Dropout - Convolution - Dense Connections - Dropout - GELU - Layer Normalization - Multi-Head Attention - Scaled Dot-Product Attention - Tanh Activation Tasks: - Image Classification Training Techniques: - Cosine Annealing - Gradient Clipping - SGD with Momentum Training Data: - ImageNet - JFT-300M Training Resources: TPUv3 ID: vit_base_patch16_384 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 512 Image Size: '384' Weight Decay: 0.0 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/5f9aff395c224492e9e44248b15f44b5cc095d9c/timm/models/vision_transformer.py#L522 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.2% Top 5 Accuracy: 97.22% - Name: vit_base_patch32_384 In Collection: Vision Transformer Metadata: FLOPs: 12656142336 Parameters: 88300000 File Size: 353210979 Architecture: - Attention Dropout - Convolution - Dense Connections - Dropout - GELU - Layer Normalization - Multi-Head Attention - Scaled Dot-Product Attention - Tanh Activation Tasks: - Image Classification Training Techniques: - Cosine Annealing - Gradient Clipping - SGD with Momentum Training Data: - ImageNet - JFT-300M Training Resources: TPUv3 ID: vit_base_patch32_384 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 512 Image Size: '384' Weight Decay: 0.0 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/5f9aff395c224492e9e44248b15f44b5cc095d9c/timm/models/vision_transformer.py#L532 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p32_384-830016f5.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.66% Top 5 Accuracy: 96.13% - Name: vit_base_resnet50_384 In Collection: Vision Transformer Metadata: FLOPs: 49461491712 Parameters: 98950000 File Size: 395854632 Architecture: - Attention Dropout - Convolution - Dense Connections - Dropout - GELU - Layer Normalization - Multi-Head Attention - Scaled Dot-Product Attention - Tanh Activation Tasks: - Image Classification Training Techniques: - Cosine Annealing - Gradient Clipping - SGD with Momentum Training Data: - ImageNet - JFT-300M Training Resources: TPUv3 ID: vit_base_resnet50_384 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 512 Image Size: '384' Weight Decay: 0.0 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/5f9aff395c224492e9e44248b15f44b5cc095d9c/timm/models/vision_transformer.py#L653 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.99% Top 5 Accuracy: 97.3% - Name: vit_large_patch16_224 In Collection: Vision Transformer Metadata: FLOPs: 119294746624 Parameters: 304330000 File Size: 1217350532 Architecture: - Attention Dropout - Convolution - Dense Connections - Dropout - GELU - Layer Normalization - Multi-Head Attention - Scaled Dot-Product Attention - Tanh Activation Tasks: - Image Classification Training Techniques: - Cosine Annealing - Gradient Clipping - SGD with Momentum Training Data: - ImageNet - JFT-300M Training Resources: TPUv3 ID: vit_large_patch16_224 Crop Pct: '0.9' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 0.0 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/5f9aff395c224492e9e44248b15f44b5cc095d9c/timm/models/vision_transformer.py#L542 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.06% Top 5 Accuracy: 96.44% - Name: vit_large_patch16_384 In Collection: Vision Transformer Metadata: FLOPs: 174702764032 Parameters: 304720000 File Size: 1218907013 Architecture: - Attention Dropout - Convolution - Dense Connections - Dropout - GELU - Layer Normalization - Multi-Head Attention - Scaled Dot-Product Attention - Tanh Activation Tasks: - Image Classification Training Techniques: - Cosine Annealing - Gradient Clipping - SGD with Momentum Training Data: - ImageNet - JFT-300M Training Resources: TPUv3 ID: vit_large_patch16_384 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 512 Image Size: '384' Weight Decay: 0.0 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/5f9aff395c224492e9e44248b15f44b5cc095d9c/timm/models/vision_transformer.py#L561 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 85.17% Top 5 Accuracy: 97.36% - Name: vit_small_patch16_224 In Collection: Vision Transformer Metadata: FLOPs: 28236450816 Parameters: 48750000 File Size: 195031454 Architecture: - Attention Dropout - Convolution - Dense Connections - Dropout - GELU - Layer Normalization - Multi-Head Attention - Scaled Dot-Product Attention - Tanh Activation Tasks: - Image Classification Training Techniques: - Cosine Annealing - Gradient Clipping - SGD with Momentum Training Data: - ImageNet - JFT-300M Training Resources: TPUv3 ID: vit_small_patch16_224 Crop Pct: '0.9' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/5f9aff395c224492e9e44248b15f44b5cc095d9c/timm/models/vision_transformer.py#L490 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/vit_small_p16_224-15ec54c9.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.85% Top 5 Accuracy: 93.42% -->
pytorch-image-models/docs/models/.templates/models/vision-transformer.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/vision-transformer.md", "repo_id": "pytorch-image-models", "token_count": 3834 }
167
# TResNet A **TResNet** is a variant on a [ResNet](https://paperswithcode.com/method/resnet) that aim to boost accuracy while maintaining GPU training and inference efficiency. They contain several design tricks including a SpaceToDepth stem, [Anti-Alias downsampling](https://paperswithcode.com/method/anti-alias-downsampling), In-Place Activated BatchNorm, Blocks selection and [squeeze-and-excitation layers](https://paperswithcode.com/method/squeeze-and-excitation-block). ## How do I use this model on an image? To load a pretrained model: ```python import timm model = timm.create_model('tresnet_l', pretrained=True) model.eval() ``` To load and preprocess the image: ```python import urllib from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform config = resolve_data_config({}, model=model) transform = create_transform(**config) url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") urllib.request.urlretrieve(url, filename) img = Image.open(filename).convert('RGB') tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```python import torch with torch.no_grad(): out = model(tensor) probabilities = torch.nn.functional.softmax(out[0], dim=0) print(probabilities.shape) # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```python # Get imagenet class mappings url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") urllib.request.urlretrieve(url, filename) with open("imagenet_classes.txt", "r") as f: categories = [s.strip() for s in f.readlines()] # Print top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): print(categories[top5_catid[i]], top5_prob[i].item()) # prints class names and probabilities like: # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `tresnet_l`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```python model = timm.create_model('tresnet_l', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{ridnik2020tresnet, title={TResNet: High Performance GPU-Dedicated Architecture}, author={Tal Ridnik and Hussam Lawen and Asaf Noy and Emanuel Ben Baruch and Gilad Sharir and Itamar Friedman}, year={2020}, eprint={2003.13630}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: TResNet Paper: Title: 'TResNet: High Performance GPU-Dedicated Architecture' URL: https://paperswithcode.com/paper/tresnet-high-performance-gpu-dedicated Models: - Name: tresnet_l In Collection: TResNet Metadata: FLOPs: 10873416792 Parameters: 53456696 File Size: 224440219 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs ID: tresnet_l LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L267 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_81_5-235b486c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.49% Top 5 Accuracy: 95.62% - Name: tresnet_l_448 In Collection: TResNet Metadata: FLOPs: 43488238584 Parameters: 53456696 File Size: 224440219 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs ID: tresnet_l_448 LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '448' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L285 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.26% Top 5 Accuracy: 95.98% - Name: tresnet_m In Collection: TResNet Metadata: FLOPs: 5733048064 Parameters: 41282200 File Size: 125861314 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs Training Time: < 24 hours ID: tresnet_m LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L261 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_80_8-dbc13962.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.8% Top 5 Accuracy: 94.86% - Name: tresnet_m_448 In Collection: TResNet Metadata: FLOPs: 22929743104 Parameters: 29278464 File Size: 125861314 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs ID: tresnet_m_448 LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '448' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L279 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_448-bc359d10.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.72% Top 5 Accuracy: 95.57% - Name: tresnet_xl In Collection: TResNet Metadata: FLOPs: 15162534034 Parameters: 75646610 File Size: 314378965 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs ID: tresnet_xl LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L273 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_82_0-a2d51b00.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.05% Top 5 Accuracy: 95.93% - Name: tresnet_xl_448 In Collection: TResNet Metadata: FLOPs: 60641712730 Parameters: 75646610 File Size: 224440219 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs ID: tresnet_xl_448 LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '448' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L291 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.06% Top 5 Accuracy: 96.19% -->
pytorch-image-models/docs/models/tresnet.md/0
{ "file_path": "pytorch-image-models/docs/models/tresnet.md", "repo_id": "pytorch-image-models", "token_count": 4197 }
168
# Big Transfer (BiT) **Big Transfer (BiT)** is a type of pretraining recipe that pre-trains on a large supervised source dataset, and fine-tunes the weights on the target task. Models are trained on the JFT-300M dataset. The finetuned models contained in this collection are finetuned on ImageNet. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('resnetv2_101x1_bitm', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `resnetv2_101x1_bitm`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('resnetv2_101x1_bitm', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @misc{kolesnikov2020big, title={Big Transfer (BiT): General Visual Representation Learning}, author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby}, year={2020}, eprint={1912.11370}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Big Transfer Paper: Title: 'Big Transfer (BiT): General Visual Representation Learning' URL: https://paperswithcode.com/paper/large-scale-learning-of-general-visual Models: - Name: resnetv2_101x1_bitm In Collection: Big Transfer Metadata: FLOPs: 5330896 Parameters: 44540000 File Size: 178256468 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPUv3-512 ID: resnetv2_101x1_bitm LR: 0.03 Epochs: 90 Layers: 101 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 4096 Image Size: '480' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L444 Weights: https://storage.googleapis.com/bit_models/BiT-M-R101x1-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.21% Top 5 Accuracy: 96.47% - Name: resnetv2_101x3_bitm In Collection: Big Transfer Metadata: FLOPs: 15988688 Parameters: 387930000 File Size: 1551830100 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPUv3-512 ID: resnetv2_101x3_bitm LR: 0.03 Epochs: 90 Layers: 101 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 4096 Image Size: '480' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L451 Weights: https://storage.googleapis.com/bit_models/BiT-M-R101x3-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.38% Top 5 Accuracy: 97.37% - Name: resnetv2_152x2_bitm In Collection: Big Transfer Metadata: FLOPs: 10659792 Parameters: 236340000 File Size: 945476668 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M ID: resnetv2_152x2_bitm Crop Pct: '1.0' Image Size: '480' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L458 Weights: https://storage.googleapis.com/bit_models/BiT-M-R152x2-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.4% Top 5 Accuracy: 97.43% - Name: resnetv2_152x4_bitm In Collection: Big Transfer Metadata: FLOPs: 21317584 Parameters: 936530000 File Size: 3746270104 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPUv3-512 ID: resnetv2_152x4_bitm Crop Pct: '1.0' Image Size: '480' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L465 Weights: https://storage.googleapis.com/bit_models/BiT-M-R152x4-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.95% Top 5 Accuracy: 97.45% - Name: resnetv2_50x1_bitm In Collection: Big Transfer Metadata: FLOPs: 5330896 Parameters: 25550000 File Size: 102242668 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPUv3-512 ID: resnetv2_50x1_bitm LR: 0.03 Epochs: 90 Layers: 50 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 4096 Image Size: '480' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L430 Weights: https://storage.googleapis.com/bit_models/BiT-M-R50x1-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.19% Top 5 Accuracy: 95.63% - Name: resnetv2_50x3_bitm In Collection: Big Transfer Metadata: FLOPs: 15988688 Parameters: 217320000 File Size: 869321580 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPUv3-512 ID: resnetv2_50x3_bitm LR: 0.03 Epochs: 90 Layers: 50 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 4096 Image Size: '480' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L437 Weights: https://storage.googleapis.com/bit_models/BiT-M-R50x3-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.75% Top 5 Accuracy: 97.12% -->
pytorch-image-models/hfdocs/source/models/big-transfer.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/big-transfer.mdx", "repo_id": "pytorch-image-models", "token_count": 4101 }
169
# Noisy Student (EfficientNet) **Noisy Student Training** is a semi-supervised learning approach. It extends the idea of self-training and distillation with the use of equal-or-larger student models and noise added to the student during learning. It has three main steps: 1. train a teacher model on labeled images 2. use the teacher to generate pseudo labels on unlabeled images 3. train a student model on the combination of labeled images and pseudo labeled images. The algorithm is iterated a few times by treating the student as a teacher to relabel the unlabeled data and training a new student. Noisy Student Training seeks to improve on self-training and distillation in two ways. First, it makes the student larger than, or at least equal to, the teacher so the student can better learn from a larger dataset. Second, it adds noise to the student so the noised student is forced to learn harder from the pseudo labels. To noise the student, it uses input noise such as RandAugment data augmentation, and model noise such as dropout and stochastic depth during training. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('tf_efficientnet_b0_ns', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0_ns`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('tf_efficientnet_b0_ns', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @misc{xie2020selftraining, title={Self-training with Noisy Student improves ImageNet classification}, author={Qizhe Xie and Minh-Thang Luong and Eduard Hovy and Quoc V. Le}, year={2020}, eprint={1911.04252}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` <!-- Type: model-index Collections: - Name: Noisy Student Paper: Title: Self-training with Noisy Student improves ImageNet classification URL: https://paperswithcode.com/paper/self-training-with-noisy-student-improves Models: - Name: tf_efficientnet_b0_ns In Collection: Noisy Student Metadata: FLOPs: 488688572 Parameters: 5290000 File Size: 21386709 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b0_ns LR: 0.128 Epochs: 700 Dropout: 0.5 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 2048 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1427 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.66% Top 5 Accuracy: 94.37% - Name: tf_efficientnet_b1_ns In Collection: Noisy Student Metadata: FLOPs: 883633200 Parameters: 7790000 File Size: 31516408 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b1_ns LR: 0.128 Epochs: 700 Dropout: 0.5 Crop Pct: '0.882' Momentum: 0.9 Batch Size: 2048 Image Size: '240' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1437 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.39% Top 5 Accuracy: 95.74% - Name: tf_efficientnet_b2_ns In Collection: Noisy Student Metadata: FLOPs: 1234321170 Parameters: 9110000 File Size: 36801803 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b2_ns LR: 0.128 Epochs: 700 Dropout: 0.5 Crop Pct: '0.89' Momentum: 0.9 Batch Size: 2048 Image Size: '260' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1447 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.39% Top 5 Accuracy: 96.24% - Name: tf_efficientnet_b3_ns In Collection: Noisy Student Metadata: FLOPs: 2275247568 Parameters: 12230000 File Size: 49385734 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b3_ns LR: 0.128 Epochs: 700 Dropout: 0.5 Crop Pct: '0.904' Momentum: 0.9 Batch Size: 2048 Image Size: '300' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1457 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.04% Top 5 Accuracy: 96.91% - Name: tf_efficientnet_b4_ns In Collection: Noisy Student Metadata: FLOPs: 5749638672 Parameters: 19340000 File Size: 77995057 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b4_ns LR: 0.128 Epochs: 700 Dropout: 0.5 Crop Pct: '0.922' Momentum: 0.9 Batch Size: 2048 Image Size: '380' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1467 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 85.15% Top 5 Accuracy: 97.47% - Name: tf_efficientnet_b5_ns In Collection: Noisy Student Metadata: FLOPs: 13176501888 Parameters: 30390000 File Size: 122404944 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b5_ns LR: 0.128 Epochs: 350 Dropout: 0.5 Crop Pct: '0.934' Momentum: 0.9 Batch Size: 2048 Image Size: '456' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1477 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 86.08% Top 5 Accuracy: 97.75% - Name: tf_efficientnet_b6_ns In Collection: Noisy Student Metadata: FLOPs: 24180518488 Parameters: 43040000 File Size: 173239537 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b6_ns LR: 0.128 Epochs: 350 Dropout: 0.5 Crop Pct: '0.942' Momentum: 0.9 Batch Size: 2048 Image Size: '528' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1487 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 86.45% Top 5 Accuracy: 97.88% - Name: tf_efficientnet_b7_ns In Collection: Noisy Student Metadata: FLOPs: 48205304880 Parameters: 66349999 File Size: 266853140 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b7_ns LR: 0.128 Epochs: 350 Dropout: 0.5 Crop Pct: '0.949' Momentum: 0.9 Batch Size: 2048 Image Size: '600' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1498 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 86.83% Top 5 Accuracy: 98.08% - Name: tf_efficientnet_l2_ns In Collection: Noisy Student Metadata: FLOPs: 611646113804 Parameters: 480310000 File Size: 1925950424 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod Training Time: 6 days ID: tf_efficientnet_l2_ns LR: 0.128 Epochs: 350 Dropout: 0.5 Crop Pct: '0.96' Momentum: 0.9 Batch Size: 2048 Image Size: '800' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1520 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 88.35% Top 5 Accuracy: 98.66% -->
pytorch-image-models/hfdocs/source/models/noisy-student.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/noisy-student.mdx", "repo_id": "pytorch-image-models", "token_count": 6683 }
170
# Optimization This page contains the API reference documentation for learning rate optimizers included in `timm`. ## Optimizers ### Factory functions [[autodoc]] timm.optim.optim_factory.create_optimizer [[autodoc]] timm.optim.optim_factory.create_optimizer_v2 ### Optimizer Classes [[autodoc]] timm.optim.adabelief.AdaBelief [[autodoc]] timm.optim.adafactor.Adafactor [[autodoc]] timm.optim.adahessian.Adahessian [[autodoc]] timm.optim.adamp.AdamP [[autodoc]] timm.optim.adamw.AdamW [[autodoc]] timm.optim.lamb.Lamb [[autodoc]] timm.optim.lars.Lars [[autodoc]] timm.optim.lookahead.Lookahead [[autodoc]] timm.optim.madgrad.MADGRAD [[autodoc]] timm.optim.nadam.Nadam [[autodoc]] timm.optim.nvnovograd.NvNovoGrad [[autodoc]] timm.optim.radam.RAdam [[autodoc]] timm.optim.rmsprop_tf.RMSpropTF [[autodoc]] timm.optim.sgdp.SGDP
pytorch-image-models/hfdocs/source/reference/optimizers.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/reference/optimizers.mdx", "repo_id": "pytorch-image-models", "token_count": 333 }
171
import os from typing import Optional from .reader_image_folder import ReaderImageFolder from .reader_image_in_tar import ReaderImageInTar def create_reader( name: str, root: Optional[str] = None, split: str = 'train', **kwargs, ): kwargs = {k: v for k, v in kwargs.items() if v is not None} name = name.lower() name = name.split('/', 1) prefix = '' if len(name) > 1: prefix = name[0] name = name[-1] # FIXME improve the selection right now just tfds prefix or fallback path, will need options to # explicitly select other options shortly if prefix == 'hfds': from .reader_hfds import ReaderHfds # defer Hf datasets import reader = ReaderHfds(name=name, root=root, split=split, **kwargs) elif prefix == 'hfids': from .reader_hfids import ReaderHfids # defer HF datasets import reader = ReaderHfids(name=name, root=root, split=split, **kwargs) elif prefix == 'tfds': from .reader_tfds import ReaderTfds # defer tensorflow import reader = ReaderTfds(name=name, root=root, split=split, **kwargs) elif prefix == 'wds': from .reader_wds import ReaderWds kwargs.pop('download', False) reader = ReaderWds(root=root, name=name, split=split, **kwargs) else: assert os.path.exists(root) # default fallback path (backwards compat), use image tar if root is a .tar file, otherwise image folder # FIXME support split here or in reader? if os.path.isfile(root) and os.path.splitext(root)[1] == '.tar': reader = ReaderImageInTar(root, **kwargs) else: reader = ReaderImageFolder(root, **kwargs) return reader
pytorch-image-models/timm/data/readers/reader_factory.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/reader_factory.py", "repo_id": "pytorch-image-models", "token_count": 694 }
172
""" Activations (memory-efficient w/ custom autograd) A collection of activations fn and modules with a common interface so that they can easily be swapped. All have an `inplace` arg even if not used. These activations are not compatible with jit scripting or ONNX export of the model, please use either the JIT or basic versions of the activations. Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn from torch.nn import functional as F @torch.jit.script def swish_jit_fwd(x): return x.mul(torch.sigmoid(x)) @torch.jit.script def swish_jit_bwd(x, grad_output): x_sigmoid = torch.sigmoid(x) return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid))) class SwishJitAutoFn(torch.autograd.Function): """ torch.jit.script optimised Swish w/ memory-efficient checkpoint Inspired by conversation btw Jeremy Howard & Adam Pazske https://twitter.com/jeremyphoward/status/1188251041835315200 """ @staticmethod def symbolic(g, x): return g.op("Mul", x, g.op("Sigmoid", x)) @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return swish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return swish_jit_bwd(x, grad_output) def swish_me(x, inplace=False): return SwishJitAutoFn.apply(x) class SwishMe(nn.Module): def __init__(self, inplace: bool = False): super(SwishMe, self).__init__() def forward(self, x): return SwishJitAutoFn.apply(x) @torch.jit.script def mish_jit_fwd(x): return x.mul(torch.tanh(F.softplus(x))) @torch.jit.script def mish_jit_bwd(x, grad_output): x_sigmoid = torch.sigmoid(x) x_tanh_sp = F.softplus(x).tanh() return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp)) class MishJitAutoFn(torch.autograd.Function): """ Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 A memory efficient, jit scripted variant of Mish """ @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return mish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return mish_jit_bwd(x, grad_output) def mish_me(x, inplace=False): return MishJitAutoFn.apply(x) class MishMe(nn.Module): def __init__(self, inplace: bool = False): super(MishMe, self).__init__() def forward(self, x): return MishJitAutoFn.apply(x) @torch.jit.script def hard_sigmoid_jit_fwd(x, inplace: bool = False): return (x + 3).clamp(min=0, max=6).div(6.) @torch.jit.script def hard_sigmoid_jit_bwd(x, grad_output): m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6. return grad_output * m class HardSigmoidJitAutoFn(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_sigmoid_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_sigmoid_jit_bwd(x, grad_output) def hard_sigmoid_me(x, inplace: bool = False): return HardSigmoidJitAutoFn.apply(x) class HardSigmoidMe(nn.Module): def __init__(self, inplace: bool = False): super(HardSigmoidMe, self).__init__() def forward(self, x): return HardSigmoidJitAutoFn.apply(x) @torch.jit.script def hard_swish_jit_fwd(x): return x * (x + 3).clamp(min=0, max=6).div(6.) @torch.jit.script def hard_swish_jit_bwd(x, grad_output): m = torch.ones_like(x) * (x >= 3.) m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m) return grad_output * m class HardSwishJitAutoFn(torch.autograd.Function): """A memory efficient, jit-scripted HardSwish activation""" @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_swish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_swish_jit_bwd(x, grad_output) @staticmethod def symbolic(g, self): input = g.op("Add", self, g.op('Constant', value_t=torch.tensor(3, dtype=torch.float))) hardtanh_ = g.op("Clip", input, g.op('Constant', value_t=torch.tensor(0, dtype=torch.float)), g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) hardtanh_ = g.op("Div", hardtanh_, g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) return g.op("Mul", self, hardtanh_) def hard_swish_me(x, inplace=False): return HardSwishJitAutoFn.apply(x) class HardSwishMe(nn.Module): def __init__(self, inplace: bool = False): super(HardSwishMe, self).__init__() def forward(self, x): return HardSwishJitAutoFn.apply(x) @torch.jit.script def hard_mish_jit_fwd(x): return 0.5 * x * (x + 2).clamp(min=0, max=2) @torch.jit.script def hard_mish_jit_bwd(x, grad_output): m = torch.ones_like(x) * (x >= -2.) m = torch.where((x >= -2.) & (x <= 0.), x + 1., m) return grad_output * m class HardMishJitAutoFn(torch.autograd.Function): """ A memory efficient, jit scripted variant of Hard Mish Experimental, based on notes by Mish author Diganta Misra at https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md """ @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_mish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_mish_jit_bwd(x, grad_output) def hard_mish_me(x, inplace: bool = False): return HardMishJitAutoFn.apply(x) class HardMishMe(nn.Module): def __init__(self, inplace: bool = False): super(HardMishMe, self).__init__() def forward(self, x): return HardMishJitAutoFn.apply(x)
pytorch-image-models/timm/layers/activations_me.py/0
{ "file_path": "pytorch-image-models/timm/layers/activations_me.py", "repo_id": "pytorch-image-models", "token_count": 2598 }
173
""" NormAct (Normalizaiton + Activation Layer) Factory Create norm + act combo modules that attempt to be backwards compatible with separate norm + act isntances in models. Where these are used it will be possible to swap separate BN + act layers with combined modules like IABN or EvoNorms. Hacked together by / Copyright 2020 Ross Wightman """ import types import functools from .evo_norm import * from .filter_response_norm import FilterResponseNormAct2d, FilterResponseNormTlu2d from .norm_act import BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d from .inplace_abn import InplaceAbn _NORM_ACT_MAP = dict( batchnorm=BatchNormAct2d, batchnorm2d=BatchNormAct2d, groupnorm=GroupNormAct, groupnorm1=functools.partial(GroupNormAct, num_groups=1), layernorm=LayerNormAct, layernorm2d=LayerNormAct2d, evonormb0=EvoNorm2dB0, evonormb1=EvoNorm2dB1, evonormb2=EvoNorm2dB2, evonorms0=EvoNorm2dS0, evonorms0a=EvoNorm2dS0a, evonorms1=EvoNorm2dS1, evonorms1a=EvoNorm2dS1a, evonorms2=EvoNorm2dS2, evonorms2a=EvoNorm2dS2a, frn=FilterResponseNormAct2d, frntlu=FilterResponseNormTlu2d, inplaceabn=InplaceAbn, iabn=InplaceAbn, ) _NORM_ACT_TYPES = {m for n, m in _NORM_ACT_MAP.items()} # has act_layer arg to define act type _NORM_ACT_REQUIRES_ARG = { BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d, FilterResponseNormAct2d, InplaceAbn} def create_norm_act_layer(layer_name, num_features, act_layer=None, apply_act=True, jit=False, **kwargs): layer = get_norm_act_layer(layer_name, act_layer=act_layer) layer_instance = layer(num_features, apply_act=apply_act, **kwargs) if jit: layer_instance = torch.jit.script(layer_instance) return layer_instance def get_norm_act_layer(norm_layer, act_layer=None): if norm_layer is None: return None assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial)) norm_act_kwargs = {} # unbind partial fn, so args can be rebound later if isinstance(norm_layer, functools.partial): norm_act_kwargs.update(norm_layer.keywords) norm_layer = norm_layer.func if isinstance(norm_layer, str): if not norm_layer: return None layer_name = norm_layer.replace('_', '').lower().split('-')[0] norm_act_layer = _NORM_ACT_MAP[layer_name] elif norm_layer in _NORM_ACT_TYPES: norm_act_layer = norm_layer elif isinstance(norm_layer, types.FunctionType): # if function type, must be a lambda/fn that creates a norm_act layer norm_act_layer = norm_layer else: type_name = norm_layer.__name__.lower() if type_name.startswith('batchnorm'): norm_act_layer = BatchNormAct2d elif type_name.startswith('groupnorm'): norm_act_layer = GroupNormAct elif type_name.startswith('groupnorm1'): norm_act_layer = functools.partial(GroupNormAct, num_groups=1) elif type_name.startswith('layernorm2d'): norm_act_layer = LayerNormAct2d elif type_name.startswith('layernorm'): norm_act_layer = LayerNormAct else: assert False, f"No equivalent norm_act layer for {type_name}" if norm_act_layer in _NORM_ACT_REQUIRES_ARG: # pass `act_layer` through for backwards compat where `act_layer=None` implies no activation. # In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types norm_act_kwargs.setdefault('act_layer', act_layer) if norm_act_kwargs: norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args return norm_act_layer
pytorch-image-models/timm/layers/create_norm_act.py/0
{ "file_path": "pytorch-image-models/timm/layers/create_norm_act.py", "repo_id": "pytorch-image-models", "token_count": 1594 }
174
""" Linear layer (alternate definition) """ import torch import torch.nn.functional as F from torch import nn as nn class Linear(nn.Linear): r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case. """ def forward(self, input: torch.Tensor) -> torch.Tensor: if torch.jit.is_scripting(): bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) else: return F.linear(input, self.weight, self.bias)
pytorch-image-models/timm/layers/linear.py/0
{ "file_path": "pytorch-image-models/timm/layers/linear.py", "repo_id": "pytorch-image-models", "token_count": 282 }
175
""" Depthwise Separable Conv Modules Basic DWS convs. Other variations of DWS exist with batch norm or activations between the DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception. Hacked together by / Copyright 2020 Ross Wightman """ from torch import nn as nn from .create_conv2d import create_conv2d from .create_norm_act import get_norm_act_layer class SeparableConvNormAct(nn.Module): """ Separable Conv w/ trailing Norm and Activation """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, apply_act=True, drop_layer=None): super(SeparableConvNormAct, self).__init__() self.conv_dw = create_conv2d( in_channels, int(in_channels * channel_multiplier), kernel_size, stride=stride, dilation=dilation, padding=padding, depthwise=True) self.conv_pw = create_conv2d( int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) norm_act_layer = get_norm_act_layer(norm_layer, act_layer) norm_kwargs = dict(drop_layer=drop_layer) if drop_layer is not None else {} self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs) @property def in_channels(self): return self.conv_dw.in_channels @property def out_channels(self): return self.conv_pw.out_channels def forward(self, x): x = self.conv_dw(x) x = self.conv_pw(x) x = self.bn(x) return x SeparableConvBnAct = SeparableConvNormAct class SeparableConv2d(nn.Module): """ Separable Conv """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, channel_multiplier=1.0, pw_kernel_size=1): super(SeparableConv2d, self).__init__() self.conv_dw = create_conv2d( in_channels, int(in_channels * channel_multiplier), kernel_size, stride=stride, dilation=dilation, padding=padding, depthwise=True) self.conv_pw = create_conv2d( int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) @property def in_channels(self): return self.conv_dw.in_channels @property def out_channels(self): return self.conv_pw.out_channels def forward(self, x): x = self.conv_dw(x) x = self.conv_pw(x) return x
pytorch-image-models/timm/layers/separable_conv.py/0
{ "file_path": "pytorch-image-models/timm/layers/separable_conv.py", "repo_id": "pytorch-image-models", "token_count": 1138 }
176
import dataclasses import logging import os from copy import deepcopy from typing import Optional, Dict, Callable, Any, Tuple from torch import nn as nn from torch.hub import load_state_dict_from_url from timm.models._features import FeatureListNet, FeatureHookNet from timm.models._features_fx import FeatureGraphNet from timm.models._helpers import load_state_dict from timm.models._hub import has_hf_hub, download_cached_file, check_cached_file, load_state_dict_from_hf from timm.models._manipulate import adapt_input_conv from timm.models._pretrained import PretrainedCfg from timm.models._prune import adapt_model_from_file from timm.models._registry import get_pretrained_cfg _logger = logging.getLogger(__name__) # Global variables for rarely used pretrained checkpoint download progress and hash check. # Use set_pretrained_download_progress / set_pretrained_check_hash functions to toggle. _DOWNLOAD_PROGRESS = False _CHECK_HASH = False _USE_OLD_CACHE = int(os.environ.get('TIMM_USE_OLD_CACHE', 0)) > 0 __all__ = ['set_pretrained_download_progress', 'set_pretrained_check_hash', 'load_custom_pretrained', 'load_pretrained', 'pretrained_cfg_for_features', 'resolve_pretrained_cfg', 'build_model_with_cfg'] def _resolve_pretrained_source(pretrained_cfg): cfg_source = pretrained_cfg.get('source', '') pretrained_url = pretrained_cfg.get('url', None) pretrained_file = pretrained_cfg.get('file', None) pretrained_sd = pretrained_cfg.get('state_dict', None) hf_hub_id = pretrained_cfg.get('hf_hub_id', None) # resolve where to load pretrained weights from load_from = '' pretrained_loc = '' if cfg_source == 'hf-hub' and has_hf_hub(necessary=True): # hf-hub specified as source via model identifier load_from = 'hf-hub' assert hf_hub_id pretrained_loc = hf_hub_id else: # default source == timm or unspecified if pretrained_sd: # direct state_dict pass through is the highest priority load_from = 'state_dict' pretrained_loc = pretrained_sd assert isinstance(pretrained_loc, dict) elif pretrained_file: # file load override is the second-highest priority if set load_from = 'file' pretrained_loc = pretrained_file else: old_cache_valid = False if _USE_OLD_CACHE: # prioritized old cached weights if exists and env var enabled old_cache_valid = check_cached_file(pretrained_url) if pretrained_url else False if not old_cache_valid and hf_hub_id and has_hf_hub(necessary=True): # hf-hub available as alternate weight source in default_cfg load_from = 'hf-hub' pretrained_loc = hf_hub_id elif pretrained_url: load_from = 'url' pretrained_loc = pretrained_url if load_from == 'hf-hub' and pretrained_cfg.get('hf_hub_filename', None): # if a filename override is set, return tuple for location w/ (hub_id, filename) pretrained_loc = pretrained_loc, pretrained_cfg['hf_hub_filename'] return load_from, pretrained_loc def set_pretrained_download_progress(enable=True): """ Set download progress for pretrained weights on/off (globally). """ global _DOWNLOAD_PROGRESS _DOWNLOAD_PROGRESS = enable def set_pretrained_check_hash(enable=True): """ Set hash checking for pretrained weights on/off (globally). """ global _CHECK_HASH _CHECK_HASH = enable def load_custom_pretrained( model: nn.Module, pretrained_cfg: Optional[Dict] = None, load_fn: Optional[Callable] = None, ): r"""Loads a custom (read non .pth) weight file Downloads checkpoint file into cache-dir like torch.hub based loaders, but calls a passed in custom load fun, or the `load_pretrained` model member fn. If the object is already present in `model_dir`, it's deserialized and returned. The default value of `model_dir` is ``<hub_dir>/checkpoints`` where `hub_dir` is the directory returned by :func:`~torch.hub.get_dir`. Args: model: The instantiated model to load weights into pretrained_cfg (dict): Default pretrained model cfg load_fn: An external standalone fn that loads weights into provided model, otherwise a fn named 'laod_pretrained' on the model will be called if it exists """ pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None) if not pretrained_cfg: _logger.warning("Invalid pretrained config, cannot load weights.") return load_from, pretrained_loc = _resolve_pretrained_source(pretrained_cfg) if not load_from: _logger.warning("No pretrained weights exist for this model. Using random initialization.") return if load_from == 'hf-hub': _logger.warning("Hugging Face hub not currently supported for custom load pretrained models.") elif load_from == 'url': pretrained_loc = download_cached_file( pretrained_loc, check_hash=_CHECK_HASH, progress=_DOWNLOAD_PROGRESS, ) if load_fn is not None: load_fn(model, pretrained_loc) elif hasattr(model, 'load_pretrained'): model.load_pretrained(pretrained_loc) else: _logger.warning("Valid function to load pretrained weights is not available, using random initialization.") def load_pretrained( model: nn.Module, pretrained_cfg: Optional[Dict] = None, num_classes: int = 1000, in_chans: int = 3, filter_fn: Optional[Callable] = None, strict: bool = True, ): """ Load pretrained checkpoint Args: model (nn.Module) : PyTorch model module pretrained_cfg (Optional[Dict]): configuration for pretrained weights / target dataset num_classes (int): num_classes for target model in_chans (int): in_chans for target model filter_fn (Optional[Callable]): state_dict filter fn for load (takes state_dict, model as args) strict (bool): strict load of checkpoint """ pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None) if not pretrained_cfg: raise RuntimeError("Invalid pretrained config, cannot load weights. Use `pretrained=False` for random init.") load_from, pretrained_loc = _resolve_pretrained_source(pretrained_cfg) if load_from == 'state_dict': _logger.info(f'Loading pretrained weights from state dict') state_dict = pretrained_loc # pretrained_loc is the actual state dict for this override elif load_from == 'file': _logger.info(f'Loading pretrained weights from file ({pretrained_loc})') if pretrained_cfg.get('custom_load', False): model.load_pretrained(pretrained_loc) return else: state_dict = load_state_dict(pretrained_loc) elif load_from == 'url': _logger.info(f'Loading pretrained weights from url ({pretrained_loc})') if pretrained_cfg.get('custom_load', False): pretrained_loc = download_cached_file( pretrained_loc, progress=_DOWNLOAD_PROGRESS, check_hash=_CHECK_HASH, ) model.load_pretrained(pretrained_loc) return else: state_dict = load_state_dict_from_url( pretrained_loc, map_location='cpu', progress=_DOWNLOAD_PROGRESS, check_hash=_CHECK_HASH, ) elif load_from == 'hf-hub': _logger.info(f'Loading pretrained weights from Hugging Face hub ({pretrained_loc})') if isinstance(pretrained_loc, (list, tuple)): state_dict = load_state_dict_from_hf(*pretrained_loc) else: state_dict = load_state_dict_from_hf(pretrained_loc) else: model_name = pretrained_cfg.get('architecture', 'this model') raise RuntimeError(f"No pretrained weights exist for {model_name}. Use `pretrained=False` for random init.") if filter_fn is not None: try: state_dict = filter_fn(state_dict, model) except TypeError as e: # for backwards compat with filter fn that take one arg state_dict = filter_fn(state_dict) input_convs = pretrained_cfg.get('first_conv', None) if input_convs is not None and in_chans != 3: if isinstance(input_convs, str): input_convs = (input_convs,) for input_conv_name in input_convs: weight_name = input_conv_name + '.weight' try: state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name]) _logger.info( f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)') except NotImplementedError as e: del state_dict[weight_name] strict = False _logger.warning( f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.') classifiers = pretrained_cfg.get('classifier', None) label_offset = pretrained_cfg.get('label_offset', 0) if classifiers is not None: if isinstance(classifiers, str): classifiers = (classifiers,) if num_classes != pretrained_cfg['num_classes']: for classifier_name in classifiers: # completely discard fully connected if model num_classes doesn't match pretrained weights state_dict.pop(classifier_name + '.weight', None) state_dict.pop(classifier_name + '.bias', None) strict = False elif label_offset > 0: for classifier_name in classifiers: # special case for pretrained weights with an extra background class in pretrained weights classifier_weight = state_dict[classifier_name + '.weight'] state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:] classifier_bias = state_dict[classifier_name + '.bias'] state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:] load_result = model.load_state_dict(state_dict, strict=strict) if load_result.missing_keys: _logger.info( f'Missing keys ({", ".join(load_result.missing_keys)}) discovered while loading pretrained weights.' f' This is expected if model is being adapted.') if load_result.unexpected_keys: _logger.warning( f'Unexpected keys ({", ".join(load_result.unexpected_keys)}) found while loading pretrained weights.' f' This may be expected if model is being adapted.') def pretrained_cfg_for_features(pretrained_cfg): pretrained_cfg = deepcopy(pretrained_cfg) # remove default pretrained cfg fields that don't have much relevance for feature backbone to_remove = ('num_classes', 'classifier', 'global_pool') # add default final pool size? for tr in to_remove: pretrained_cfg.pop(tr, None) return pretrained_cfg def _filter_kwargs(kwargs, names): if not kwargs or not names: return for n in names: kwargs.pop(n, None) def _update_default_model_kwargs(pretrained_cfg, kwargs, kwargs_filter): """ Update the default_cfg and kwargs before passing to model Args: pretrained_cfg: input pretrained cfg (updated in-place) kwargs: keyword args passed to model build fn (updated in-place) kwargs_filter: keyword arg keys that must be removed before model __init__ """ # Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs) default_kwarg_names = ('num_classes', 'global_pool', 'in_chans') if pretrained_cfg.get('fixed_input_size', False): # if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size default_kwarg_names += ('img_size',) for n in default_kwarg_names: # for legacy reasons, model __init__args uses img_size + in_chans as separate args while # pretrained_cfg has one input_size=(C, H ,W) entry if n == 'img_size': input_size = pretrained_cfg.get('input_size', None) if input_size is not None: assert len(input_size) == 3 kwargs.setdefault(n, input_size[-2:]) elif n == 'in_chans': input_size = pretrained_cfg.get('input_size', None) if input_size is not None: assert len(input_size) == 3 kwargs.setdefault(n, input_size[0]) elif n == 'num_classes': default_val = pretrained_cfg.get(n, None) # if default is < 0, don't pass through to model if default_val is not None and default_val >= 0: kwargs.setdefault(n, pretrained_cfg[n]) else: default_val = pretrained_cfg.get(n, None) if default_val is not None: kwargs.setdefault(n, pretrained_cfg[n]) # Filter keyword args for task specific model variants (some 'features only' models, etc.) _filter_kwargs(kwargs, names=kwargs_filter) def resolve_pretrained_cfg( variant: str, pretrained_cfg=None, pretrained_cfg_overlay=None, ) -> PretrainedCfg: model_with_tag = variant pretrained_tag = None if pretrained_cfg: if isinstance(pretrained_cfg, dict): # pretrained_cfg dict passed as arg, validate by converting to PretrainedCfg pretrained_cfg = PretrainedCfg(**pretrained_cfg) elif isinstance(pretrained_cfg, str): pretrained_tag = pretrained_cfg pretrained_cfg = None # fallback to looking up pretrained cfg in model registry by variant identifier if not pretrained_cfg: if pretrained_tag: model_with_tag = '.'.join([variant, pretrained_tag]) pretrained_cfg = get_pretrained_cfg(model_with_tag) if not pretrained_cfg: _logger.warning( f"No pretrained configuration specified for {model_with_tag} model. Using a default." f" Please add a config to the model pretrained_cfg registry or pass explicitly.") pretrained_cfg = PretrainedCfg() # instance with defaults pretrained_cfg_overlay = pretrained_cfg_overlay or {} if not pretrained_cfg.architecture: pretrained_cfg_overlay.setdefault('architecture', variant) pretrained_cfg = dataclasses.replace(pretrained_cfg, **pretrained_cfg_overlay) return pretrained_cfg def build_model_with_cfg( model_cls: Callable, variant: str, pretrained: bool, pretrained_cfg: Optional[Dict] = None, pretrained_cfg_overlay: Optional[Dict] = None, model_cfg: Optional[Any] = None, feature_cfg: Optional[Dict] = None, pretrained_strict: bool = True, pretrained_filter_fn: Optional[Callable] = None, kwargs_filter: Optional[Tuple[str]] = None, **kwargs, ): """ Build model with specified default_cfg and optional model_cfg This helper fn aids in the construction of a model including: * handling default_cfg and associated pretrained weight loading * passing through optional model_cfg for models with config based arch spec * features_only model adaptation * pruning config / model adaptation Args: model_cls (nn.Module): model class variant (str): model variant name pretrained (bool): load pretrained weights pretrained_cfg (dict): model's pretrained weight/task config model_cfg (Optional[Dict]): model's architecture config feature_cfg (Optional[Dict]: feature extraction adapter config pretrained_strict (bool): load pretrained weights strictly pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model **kwargs: model args passed through to model __init__ """ pruned = kwargs.pop('pruned', False) features = False feature_cfg = feature_cfg or {} # resolve and update model pretrained config and model kwargs pretrained_cfg = resolve_pretrained_cfg( variant, pretrained_cfg=pretrained_cfg, pretrained_cfg_overlay=pretrained_cfg_overlay ) # FIXME converting back to dict, PretrainedCfg use should be propagated further, but not into model pretrained_cfg = pretrained_cfg.to_dict() _update_default_model_kwargs(pretrained_cfg, kwargs, kwargs_filter) # Setup for feature extraction wrapper done at end of this fn if kwargs.pop('features_only', False): features = True feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4)) if 'out_indices' in kwargs: feature_cfg['out_indices'] = kwargs.pop('out_indices') # Instantiate the model if model_cfg is None: model = model_cls(**kwargs) else: model = model_cls(cfg=model_cfg, **kwargs) model.pretrained_cfg = pretrained_cfg model.default_cfg = model.pretrained_cfg # alias for backwards compat if pruned: model = adapt_model_from_file(model, variant) # For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000)) if pretrained: load_pretrained( model, pretrained_cfg=pretrained_cfg, num_classes=num_classes_pretrained, in_chans=kwargs.get('in_chans', 3), filter_fn=pretrained_filter_fn, strict=pretrained_strict, ) # Wrap the model in a feature extraction module if enabled if features: feature_cls = FeatureListNet output_fmt = getattr(model, 'output_fmt', None) if output_fmt is not None: feature_cfg.setdefault('output_fmt', output_fmt) if 'feature_cls' in feature_cfg: feature_cls = feature_cfg.pop('feature_cls') if isinstance(feature_cls, str): feature_cls = feature_cls.lower() if 'hook' in feature_cls: feature_cls = FeatureHookNet elif feature_cls == 'fx': feature_cls = FeatureGraphNet else: assert False, f'Unknown feature class {feature_cls}' model = feature_cls(model, **feature_cfg) model.pretrained_cfg = pretrained_cfg_for_features(pretrained_cfg) # add back pretrained cfg model.default_cfg = model.pretrained_cfg # alias for rename backwards compat (default_cfg -> pretrained_cfg) return model
pytorch-image-models/timm/models/_builder.py/0
{ "file_path": "pytorch-image-models/timm/models/_builder.py", "repo_id": "pytorch-image-models", "token_count": 7677 }
177
""" Model Registry Hacked together by / Copyright 2020 Ross Wightman """ import fnmatch import re import sys import warnings from collections import defaultdict, deque from copy import deepcopy from dataclasses import replace from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Sequence, Union, Tuple from ._pretrained import PretrainedCfg, DefaultCfg __all__ = [ 'split_model_name_tag', 'get_arch_name', 'register_model', 'generate_default_cfgs', 'list_models', 'list_pretrained', 'is_model', 'model_entrypoint', 'list_modules', 'is_model_in_modules', 'get_pretrained_cfg_value', 'is_model_pretrained' ] _module_to_models: Dict[str, Set[str]] = defaultdict(set) # dict of sets to check membership of model in module _model_to_module: Dict[str, str] = {} # mapping of model names to module names _model_entrypoints: Dict[str, Callable[..., Any]] = {} # mapping of model names to architecture entrypoint fns _model_has_pretrained: Set[str] = set() # set of model names that have pretrained weight url present _model_default_cfgs: Dict[str, PretrainedCfg] = {} # central repo for model arch -> default cfg objects _model_pretrained_cfgs: Dict[str, PretrainedCfg] = {} # central repo for model arch.tag -> pretrained cfgs _model_with_tags: Dict[str, List[str]] = defaultdict(list) # shortcut to map each model arch to all model + tag names _module_to_deprecated_models: Dict[str, Dict[str, Optional[str]]] = defaultdict(dict) _deprecated_models: Dict[str, Optional[str]] = {} def split_model_name_tag(model_name: str, no_tag: str = '') -> Tuple[str, str]: model_name, *tag_list = model_name.split('.', 1) tag = tag_list[0] if tag_list else no_tag return model_name, tag def get_arch_name(model_name: str) -> str: return split_model_name_tag(model_name)[0] def generate_default_cfgs(cfgs: Dict[str, Union[Dict[str, Any], PretrainedCfg]]): out = defaultdict(DefaultCfg) default_set = set() # no tag and tags ending with * are prioritized as default for k, v in cfgs.items(): if isinstance(v, dict): v = PretrainedCfg(**v) has_weights = v.has_weights model, tag = split_model_name_tag(k) is_default_set = model in default_set priority = (has_weights and not tag) or (tag.endswith('*') and not is_default_set) tag = tag.strip('*') default_cfg = out[model] if priority: default_cfg.tags.appendleft(tag) default_set.add(model) elif has_weights and not default_cfg.is_pretrained: default_cfg.tags.appendleft(tag) else: default_cfg.tags.append(tag) if has_weights: default_cfg.is_pretrained = True default_cfg.cfgs[tag] = v return out def register_model(fn: Callable[..., Any]) -> Callable[..., Any]: # lookup containing module mod = sys.modules[fn.__module__] module_name_split = fn.__module__.split('.') module_name = module_name_split[-1] if len(module_name_split) else '' # add model to __all__ in module model_name = fn.__name__ if hasattr(mod, '__all__'): mod.__all__.append(model_name) else: mod.__all__ = [model_name] # type: ignore # add entries to registry dict/sets if model_name in _model_entrypoints: warnings.warn( f'Overwriting {model_name} in registry with {fn.__module__}.{model_name}. This is because the name being ' 'registered conflicts with an existing name. Please check if this is not expected.', stacklevel=2, ) _model_entrypoints[model_name] = fn _model_to_module[model_name] = module_name _module_to_models[module_name].add(model_name) if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs: # this will catch all models that have entrypoint matching cfg key, but miss any aliasing # entrypoints or non-matching combos default_cfg = mod.default_cfgs[model_name] if not isinstance(default_cfg, DefaultCfg): # new style default cfg dataclass w/ multiple entries per model-arch assert isinstance(default_cfg, dict) # old style cfg dict per model-arch pretrained_cfg = PretrainedCfg(**default_cfg) default_cfg = DefaultCfg(tags=deque(['']), cfgs={'': pretrained_cfg}) for tag_idx, tag in enumerate(default_cfg.tags): is_default = tag_idx == 0 pretrained_cfg = default_cfg.cfgs[tag] model_name_tag = '.'.join([model_name, tag]) if tag else model_name replace_items = dict(architecture=model_name, tag=tag if tag else None) if pretrained_cfg.hf_hub_id and pretrained_cfg.hf_hub_id == 'timm/': # auto-complete hub name w/ architecture.tag replace_items['hf_hub_id'] = pretrained_cfg.hf_hub_id + model_name_tag pretrained_cfg = replace(pretrained_cfg, **replace_items) if is_default: _model_pretrained_cfgs[model_name] = pretrained_cfg if pretrained_cfg.has_weights: # add tagless entry if it's default and has weights _model_has_pretrained.add(model_name) if tag: _model_pretrained_cfgs[model_name_tag] = pretrained_cfg if pretrained_cfg.has_weights: # add model w/ tag if tag is valid _model_has_pretrained.add(model_name_tag) _model_with_tags[model_name].append(model_name_tag) else: _model_with_tags[model_name].append(model_name) # has empty tag (to slowly remove these instances) _model_default_cfgs[model_name] = default_cfg return fn def _deprecated_model_shim(deprecated_name: str, current_fn: Callable = None, current_tag: str = ''): def _fn(pretrained=False, **kwargs): assert current_fn is not None, f'Model {deprecated_name} has been removed with no replacement.' current_name = '.'.join([current_fn.__name__, current_tag]) if current_tag else current_fn.__name__ warnings.warn(f'Mapping deprecated model name {deprecated_name} to current {current_name}.', stacklevel=2) pretrained_cfg = kwargs.pop('pretrained_cfg', None) return current_fn(pretrained=pretrained, pretrained_cfg=pretrained_cfg or current_tag, **kwargs) return _fn def register_model_deprecations(module_name: str, deprecation_map: Dict[str, Optional[str]]): mod = sys.modules[module_name] module_name_split = module_name.split('.') module_name = module_name_split[-1] if len(module_name_split) else '' for deprecated, current in deprecation_map.items(): if hasattr(mod, '__all__'): mod.__all__.append(deprecated) current_fn = None current_tag = '' if current: current_name, current_tag = split_model_name_tag(current) current_fn = getattr(mod, current_name) deprecated_entrypoint_fn = _deprecated_model_shim(deprecated, current_fn, current_tag) setattr(mod, deprecated, deprecated_entrypoint_fn) _model_entrypoints[deprecated] = deprecated_entrypoint_fn _model_to_module[deprecated] = module_name _module_to_models[module_name].add(deprecated) _deprecated_models[deprecated] = current _module_to_deprecated_models[module_name][deprecated] = current def _natural_key(string_: str) -> List[Union[int, str]]: """See https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/""" return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] def _expand_filter(filter: str): """ expand a 'base_filter' to 'base_filter.*' if no tag portion""" filter_base, filter_tag = split_model_name_tag(filter) if not filter_tag: return ['.'.join([filter_base, '*']), filter] else: return [filter] def list_models( filter: Union[str, List[str]] = '', module: str = '', pretrained: bool = False, exclude_filters: Union[str, List[str]] = '', name_matches_cfg: bool = False, include_tags: Optional[bool] = None, ) -> List[str]: """ Return list of available model names, sorted alphabetically Args: filter - Wildcard filter string that works with fnmatch module - Limit model selection to a specific submodule (ie 'vision_transformer') pretrained - Include only models with valid pretrained weights if True exclude_filters - Wildcard filters to exclude models after including them with filter name_matches_cfg - Include only models w/ model_name matching default_cfg name (excludes some aliases) include_tags - Include pretrained tags in model names (model.tag). If None, defaults set to True when pretrained=True else False (default: None) Returns: models - The sorted list of models Example: model_list('gluon_resnet*') -- returns all models starting with 'gluon_resnet' model_list('*resnext*, 'resnet') -- returns all models with 'resnext' in 'resnet' module """ if filter: include_filters = filter if isinstance(filter, (tuple, list)) else [filter] else: include_filters = [] if include_tags is None: # FIXME should this be default behaviour? or default to include_tags=True? include_tags = pretrained all_models: Set[str] = _module_to_models[module] if module else set(_model_entrypoints.keys()) all_models = all_models - _deprecated_models.keys() # remove deprecated models from listings if include_tags: # expand model names to include names w/ pretrained tags models_with_tags: Set[str] = set() for m in all_models: models_with_tags.update(_model_with_tags[m]) all_models = models_with_tags # expand include and exclude filters to include a '.*' for proper match if no tags in filter include_filters = [ef for f in include_filters for ef in _expand_filter(f)] exclude_filters = [ef for f in exclude_filters for ef in _expand_filter(f)] if include_filters: models: Set[str] = set() for f in include_filters: include_models = fnmatch.filter(all_models, f) # include these models if len(include_models): models = models.union(include_models) else: models = all_models if exclude_filters: if not isinstance(exclude_filters, (tuple, list)): exclude_filters = [exclude_filters] for xf in exclude_filters: exclude_models = fnmatch.filter(models, xf) # exclude these models if len(exclude_models): models = models.difference(exclude_models) if pretrained: models = _model_has_pretrained.intersection(models) if name_matches_cfg: models = set(_model_pretrained_cfgs).intersection(models) return sorted(models, key=_natural_key) def list_pretrained( filter: Union[str, List[str]] = '', exclude_filters: str = '', ) -> List[str]: return list_models( filter=filter, pretrained=True, exclude_filters=exclude_filters, include_tags=True, ) def get_deprecated_models(module: str = '') -> Dict[str, str]: all_deprecated = _module_to_deprecated_models[module] if module else _deprecated_models return deepcopy(all_deprecated) def is_model(model_name: str) -> bool: """ Check if a model name exists """ arch_name = get_arch_name(model_name) return arch_name in _model_entrypoints def model_entrypoint(model_name: str, module_filter: Optional[str] = None) -> Callable[..., Any]: """Fetch a model entrypoint for specified model name """ arch_name = get_arch_name(model_name) if module_filter and arch_name not in _module_to_models.get(module_filter, {}): raise RuntimeError(f'Model ({model_name} not found in module {module_filter}.') return _model_entrypoints[arch_name] def list_modules() -> List[str]: """ Return list of module names that contain models / model entrypoints """ modules = _module_to_models.keys() return sorted(modules) def is_model_in_modules( model_name: str, module_names: Union[Tuple[str, ...], List[str], Set[str]] ) -> bool: """Check if a model exists within a subset of modules Args: model_name - name of model to check module_names - names of modules to search in """ arch_name = get_arch_name(model_name) assert isinstance(module_names, (tuple, list, set)) return any(arch_name in _module_to_models[n] for n in module_names) def is_model_pretrained(model_name: str) -> bool: return model_name in _model_has_pretrained def get_pretrained_cfg(model_name: str, allow_unregistered: bool = True) -> Optional[PretrainedCfg]: if model_name in _model_pretrained_cfgs: return deepcopy(_model_pretrained_cfgs[model_name]) arch_name, tag = split_model_name_tag(model_name) if arch_name in _model_default_cfgs: # if model arch exists, but the tag is wrong, error out raise RuntimeError(f'Invalid pretrained tag ({tag}) for {arch_name}.') if allow_unregistered: # if model arch doesn't exist, it has no pretrained_cfg registered, allow a default to be created return None raise RuntimeError(f'Model architecture ({arch_name}) has no pretrained cfg registered.') def get_pretrained_cfg_value(model_name: str, cfg_key: str) -> Optional[Any]: """ Get a specific model default_cfg value by key. None if key doesn't exist. """ cfg = get_pretrained_cfg(model_name, allow_unregistered=False) return getattr(cfg, cfg_key, None)
pytorch-image-models/timm/models/_registry.py/0
{ "file_path": "pytorch-image-models/timm/models/_registry.py", "repo_id": "pytorch-image-models", "token_count": 5428 }
178
""" EdgeNeXt Paper: `EdgeNeXt: Efficiently Amalgamated CNN-Transformer Architecture for Mobile Vision Applications` - https://arxiv.org/abs/2206.10589 Original code and weights from https://github.com/mmaaz60/EdgeNeXt Modifications and additions for timm by / Copyright 2022, Ross Wightman """ import math from collections import OrderedDict from functools import partial from typing import Tuple import torch import torch.nn.functional as F from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import trunc_normal_tf_, DropPath, LayerNorm2d, Mlp, SelectAdaptivePool2d, create_conv2d, \ use_fused_attn, NormMlpClassifierHead, ClassifierHead from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._manipulate import named_apply, checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['EdgeNeXt'] # model_registry will add each entrypoint fn to this @register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method class PositionalEncodingFourier(nn.Module): def __init__(self, hidden_dim=32, dim=768, temperature=10000): super().__init__() self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) self.scale = 2 * math.pi self.temperature = temperature self.hidden_dim = hidden_dim self.dim = dim def forward(self, shape: Tuple[int, int, int]): device = self.token_projection.weight.device dtype = self.token_projection.weight.dtype inv_mask = ~torch.zeros(shape).to(device=device, dtype=torch.bool) y_embed = inv_mask.cumsum(1, dtype=torch.float32) x_embed = inv_mask.cumsum(2, dtype=torch.float32) eps = 1e-6 y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale dim_t = torch.arange(self.hidden_dim, dtype=torch.int64, device=device).to(torch.float32) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack( (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack( (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) pos = self.token_projection(pos.to(dtype)) return pos class ConvBlock(nn.Module): def __init__( self, dim, dim_out=None, kernel_size=7, stride=1, conv_bias=True, expand_ratio=4, ls_init_value=1e-6, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop_path=0., ): super().__init__() dim_out = dim_out or dim self.shortcut_after_dw = stride > 1 or dim != dim_out self.conv_dw = create_conv2d( dim, dim_out, kernel_size=kernel_size, stride=stride, depthwise=True, bias=conv_bias) self.norm = norm_layer(dim_out) self.mlp = Mlp(dim_out, int(expand_ratio * dim_out), act_layer=act_layer) self.gamma = nn.Parameter(ls_init_value * torch.ones(dim_out)) if ls_init_value > 0 else None self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = x x = self.conv_dw(x) if self.shortcut_after_dw: shortcut = x x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) x = self.norm(x) x = self.mlp(x) if self.gamma is not None: x = self.gamma * x x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) x = shortcut + self.drop_path(x) return x class CrossCovarianceAttn(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0. ): super().__init__() self.num_heads = num_heads self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 4, 1) q, k, v = qkv.unbind(0) # NOTE, this is NOT spatial attn, q, k, v are B, num_heads, C, L --> C x C attn map attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) * self.temperature attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v) x = x.permute(0, 3, 1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x @torch.jit.ignore def no_weight_decay(self): return {'temperature'} class SplitTransposeBlock(nn.Module): def __init__( self, dim, num_scales=1, num_heads=8, expand_ratio=4, use_pos_emb=True, conv_bias=True, qkv_bias=True, ls_init_value=1e-6, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop_path=0., attn_drop=0., proj_drop=0. ): super().__init__() width = max(int(math.ceil(dim / num_scales)), int(math.floor(dim // num_scales))) self.width = width self.num_scales = max(1, num_scales - 1) convs = [] for i in range(self.num_scales): convs.append(create_conv2d(width, width, kernel_size=3, depthwise=True, bias=conv_bias)) self.convs = nn.ModuleList(convs) self.pos_embd = None if use_pos_emb: self.pos_embd = PositionalEncodingFourier(dim=dim) self.norm_xca = norm_layer(dim) self.gamma_xca = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None self.xca = CrossCovarianceAttn( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.norm = norm_layer(dim, eps=1e-6) self.mlp = Mlp(dim, int(expand_ratio * dim), act_layer=act_layer) self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = x # scales code re-written for torchscript as per my res2net fixes -rw # NOTE torch.split(x, self.width, 1) causing issues with ONNX export spx = x.chunk(len(self.convs) + 1, dim=1) spo = [] sp = spx[0] for i, conv in enumerate(self.convs): if i > 0: sp = sp + spx[i] sp = conv(sp) spo.append(sp) spo.append(spx[-1]) x = torch.cat(spo, 1) # XCA B, C, H, W = x.shape x = x.reshape(B, C, H * W).permute(0, 2, 1) if self.pos_embd is not None: pos_encoding = self.pos_embd((B, H, W)).reshape(B, -1, x.shape[1]).permute(0, 2, 1) x = x + pos_encoding x = x + self.drop_path(self.gamma_xca * self.xca(self.norm_xca(x))) x = x.reshape(B, H, W, C) # Inverted Bottleneck x = self.norm(x) x = self.mlp(x) if self.gamma is not None: x = self.gamma * x x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) x = shortcut + self.drop_path(x) return x class EdgeNeXtStage(nn.Module): def __init__( self, in_chs, out_chs, stride=2, depth=2, num_global_blocks=1, num_heads=4, scales=2, kernel_size=7, expand_ratio=4, use_pos_emb=False, downsample_block=False, conv_bias=True, ls_init_value=1.0, drop_path_rates=None, norm_layer=LayerNorm2d, norm_layer_cl=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU ): super().__init__() self.grad_checkpointing = False if downsample_block or stride == 1: self.downsample = nn.Identity() else: self.downsample = nn.Sequential( norm_layer(in_chs), nn.Conv2d(in_chs, out_chs, kernel_size=2, stride=2, bias=conv_bias) ) in_chs = out_chs stage_blocks = [] for i in range(depth): if i < depth - num_global_blocks: stage_blocks.append( ConvBlock( dim=in_chs, dim_out=out_chs, stride=stride if downsample_block and i == 0 else 1, conv_bias=conv_bias, kernel_size=kernel_size, expand_ratio=expand_ratio, ls_init_value=ls_init_value, drop_path=drop_path_rates[i], norm_layer=norm_layer_cl, act_layer=act_layer, ) ) else: stage_blocks.append( SplitTransposeBlock( dim=in_chs, num_scales=scales, num_heads=num_heads, expand_ratio=expand_ratio, use_pos_emb=use_pos_emb, conv_bias=conv_bias, ls_init_value=ls_init_value, drop_path=drop_path_rates[i], norm_layer=norm_layer_cl, act_layer=act_layer, ) ) in_chs = out_chs self.blocks = nn.Sequential(*stage_blocks) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class EdgeNeXt(nn.Module): def __init__( self, in_chans=3, num_classes=1000, global_pool='avg', dims=(24, 48, 88, 168), depths=(3, 3, 9, 3), global_block_counts=(0, 1, 1, 1), kernel_sizes=(3, 5, 7, 9), heads=(8, 8, 8, 8), d2_scales=(2, 2, 3, 4), use_pos_emb=(False, True, False, False), ls_init_value=1e-6, head_init_scale=1., expand_ratio=4, downsample_block=False, conv_bias=True, stem_type='patch', head_norm_first=False, act_layer=nn.GELU, drop_path_rate=0., drop_rate=0., ): super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.drop_rate = drop_rate norm_layer = partial(LayerNorm2d, eps=1e-6) norm_layer_cl = partial(nn.LayerNorm, eps=1e-6) self.feature_info = [] assert stem_type in ('patch', 'overlap') if stem_type == 'patch': self.stem = nn.Sequential( nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4, bias=conv_bias), norm_layer(dims[0]), ) else: self.stem = nn.Sequential( nn.Conv2d(in_chans, dims[0], kernel_size=9, stride=4, padding=9 // 2, bias=conv_bias), norm_layer(dims[0]), ) curr_stride = 4 stages = [] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] in_chs = dims[0] for i in range(4): stride = 2 if curr_stride == 2 or i > 0 else 1 # FIXME support dilation / output_stride curr_stride *= stride stages.append(EdgeNeXtStage( in_chs=in_chs, out_chs=dims[i], stride=stride, depth=depths[i], num_global_blocks=global_block_counts[i], num_heads=heads[i], drop_path_rates=dp_rates[i], scales=d2_scales[i], expand_ratio=expand_ratio, kernel_size=kernel_sizes[i], use_pos_emb=use_pos_emb[i], ls_init_value=ls_init_value, downsample_block=downsample_block, conv_bias=conv_bias, norm_layer=norm_layer, norm_layer_cl=norm_layer_cl, act_layer=act_layer, )) # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2 in_chs = dims[i] self.feature_info += [dict(num_chs=in_chs, reduction=curr_stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.num_features = dims[-1] if head_norm_first: self.norm_pre = norm_layer(self.num_features) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, ) else: self.norm_pre = nn.Identity() self.head = NormMlpClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, norm_layer=norm_layer, ) named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+)\.downsample', (0,)), # blocks (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^norm_pre', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes=0, global_pool=None): self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm_pre(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name=None, head_init_scale=1.0): if isinstance(module, nn.Conv2d): trunc_normal_tf_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Linear): trunc_normal_tf_(module.weight, std=.02) nn.init.zeros_(module.bias) if name and 'head.' in name: module.weight.data.mul_(head_init_scale) module.bias.data.mul_(head_init_scale) def checkpoint_filter_fn(state_dict, model): """ Remap FB checkpoints -> timm """ if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict: return state_dict # non-FB checkpoint # models were released as train checkpoints... :/ if 'model_ema' in state_dict: state_dict = state_dict['model_ema'] elif 'model' in state_dict: state_dict = state_dict['model'] elif 'state_dict' in state_dict: state_dict = state_dict['state_dict'] out_dict = {} import re for k, v in state_dict.items(): k = k.replace('downsample_layers.0.', 'stem.') k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k) k = k.replace('dwconv', 'conv_dw') k = k.replace('pwconv', 'mlp.fc') k = k.replace('head.', 'head.fc.') if k.startswith('norm.'): k = k.replace('norm', 'head.norm') if v.ndim == 2 and 'head' not in k: model_shape = model.state_dict()[k].shape v = v.reshape(model_shape) out_dict[k] = v return out_dict def _create_edgenext(variant, pretrained=False, **kwargs): model = build_model_with_cfg( EdgeNeXt, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'edgenext_xx_small.in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'edgenext_x_small.in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'edgenext_small.usi_in1k': _cfg( # USI weights hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, ), 'edgenext_base.usi_in1k': _cfg( # USI weights hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, ), 'edgenext_base.in21k_ft_in1k': _cfg( # USI weights hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, ), 'edgenext_small_rw.sw_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 320, 320), test_crop_pct=1.0, ), }) @register_model def edgenext_xx_small(pretrained=False, **kwargs) -> EdgeNeXt: # 1.33M & 260.58M @ 256 resolution # 71.23% Top-1 accuracy # No AA, Color Jitter=0.4, No Mixup & Cutmix, DropPath=0.0, BS=4096, lr=0.006, multi-scale-sampler # Jetson FPS=51.66 versus 47.67 for MobileViT_XXS # For A100: FPS @ BS=1: 212.13 & @ BS=256: 7042.06 versus FPS @ BS=1: 96.68 & @ BS=256: 4624.71 for MobileViT_XXS model_args = dict(depths=(2, 2, 6, 2), dims=(24, 48, 88, 168), heads=(4, 4, 4, 4)) return _create_edgenext('edgenext_xx_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def edgenext_x_small(pretrained=False, **kwargs) -> EdgeNeXt: # 2.34M & 538.0M @ 256 resolution # 75.00% Top-1 accuracy # No AA, No Mixup & Cutmix, DropPath=0.0, BS=4096, lr=0.006, multi-scale-sampler # Jetson FPS=31.61 versus 28.49 for MobileViT_XS # For A100: FPS @ BS=1: 179.55 & @ BS=256: 4404.95 versus FPS @ BS=1: 94.55 & @ BS=256: 2361.53 for MobileViT_XS model_args = dict(depths=(3, 3, 9, 3), dims=(32, 64, 100, 192), heads=(4, 4, 4, 4)) return _create_edgenext('edgenext_x_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def edgenext_small(pretrained=False, **kwargs) -> EdgeNeXt: # 5.59M & 1260.59M @ 256 resolution # 79.43% Top-1 accuracy # AA=True, No Mixup & Cutmix, DropPath=0.1, BS=4096, lr=0.006, multi-scale-sampler # Jetson FPS=20.47 versus 18.86 for MobileViT_S # For A100: FPS @ BS=1: 172.33 & @ BS=256: 3010.25 versus FPS @ BS=1: 93.84 & @ BS=256: 1785.92 for MobileViT_S model_args = dict(depths=(3, 3, 9, 3), dims=(48, 96, 160, 304)) return _create_edgenext('edgenext_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def edgenext_base(pretrained=False, **kwargs) -> EdgeNeXt: # 18.51M & 3840.93M @ 256 resolution # 82.5% (normal) 83.7% (USI) Top-1 accuracy # AA=True, Mixup & Cutmix, DropPath=0.1, BS=4096, lr=0.006, multi-scale-sampler # Jetson FPS=xx.xx versus xx.xx for MobileViT_S # For A100: FPS @ BS=1: xxx.xx & @ BS=256: xxxx.xx model_args = dict(depths=[3, 3, 9, 3], dims=[80, 160, 288, 584]) return _create_edgenext('edgenext_base', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def edgenext_small_rw(pretrained=False, **kwargs) -> EdgeNeXt: model_args = dict( depths=(3, 3, 9, 3), dims=(48, 96, 192, 384), downsample_block=True, conv_bias=False, stem_type='overlap') return _create_edgenext('edgenext_small_rw', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/edgenext.py/0
{ "file_path": "pytorch-image-models/timm/models/edgenext.py", "repo_id": "pytorch-image-models", "token_count": 11040 }
179
""" HRNet Copied from https://github.com/HRNet/HRNet-Image-Classification Original header: Copyright (c) Microsoft Licensed under the MIT License. Written by Bin Xiao ([email protected]) Modified by Ke Sun ([email protected]) """ import logging from typing import List import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg, pretrained_cfg_for_features from ._features import FeatureInfo from ._registry import register_model, generate_default_cfgs from .resnet import BasicBlock, Bottleneck # leveraging ResNet block_types w/ additional features like SE __all__ = ['HighResolutionNet', 'HighResolutionNetFeatures'] # model_registry will add each entrypoint fn to this _BN_MOMENTUM = 0.1 _logger = logging.getLogger(__name__) cfg_cls = dict( hrnet_w18_small=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(1,), num_channels=(32,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(2, 2), num_channels=(16, 32), fuse_method='SUM' ), stage3=dict( num_modules=1, num_branches=3, block_type='BASIC', num_blocks=(2, 2, 2), num_channels=(16, 32, 64), fuse_method='SUM' ), stage4=dict( num_modules=1, num_branches=4, block_type='BASIC', num_blocks=(2, 2, 2, 2), num_channels=(16, 32, 64, 128), fuse_method='SUM', ), ), hrnet_w18_small_v2=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(2,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(2, 2), num_channels=(18, 36), fuse_method='SUM' ), stage3=dict( num_modules=3, num_branches=3, block_type='BASIC', num_blocks=(2, 2, 2), num_channels=(18, 36, 72), fuse_method='SUM' ), stage4=dict( num_modules=2, num_branches=4, block_type='BASIC', num_blocks=(2, 2, 2, 2), num_channels=(18, 36, 72, 144), fuse_method='SUM', ), ), hrnet_w18=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(18, 36), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(18, 36, 72), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(18, 36, 72, 144), fuse_method='SUM', ), ), hrnet_w30=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(30, 60), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(30, 60, 120), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(30, 60, 120, 240), fuse_method='SUM', ), ), hrnet_w32=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(32, 64), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256), fuse_method='SUM', ), ), hrnet_w40=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(40, 80), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(40, 80, 160), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(40, 80, 160, 320), fuse_method='SUM', ), ), hrnet_w44=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(44, 88), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(44, 88, 176), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(44, 88, 176, 352), fuse_method='SUM', ), ), hrnet_w48=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(48, 96), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(48, 96, 192), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(48, 96, 192, 384), fuse_method='SUM', ), ), hrnet_w64=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(64, 128), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(64, 128, 256), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(64, 128, 256, 512), fuse_method='SUM', ), ) ) class HighResolutionModule(nn.Module): def __init__( self, num_branches, block_types, num_blocks, num_in_chs, num_channels, fuse_method, multi_scale_output=True, ): super(HighResolutionModule, self).__init__() self._check_branches( num_branches, block_types, num_blocks, num_in_chs, num_channels, ) self.num_in_chs = num_in_chs self.fuse_method = fuse_method self.num_branches = num_branches self.multi_scale_output = multi_scale_output self.branches = self._make_branches( num_branches, block_types, num_blocks, num_channels, ) self.fuse_layers = self._make_fuse_layers() self.fuse_act = nn.ReLU(False) def _check_branches(self, num_branches, block_types, num_blocks, num_in_chs, num_channels): error_msg = '' if num_branches != len(num_blocks): error_msg = 'num_branches({}) <> num_blocks({})'.format(num_branches, len(num_blocks)) elif num_branches != len(num_channels): error_msg = 'num_branches({}) <> num_channels({})'.format(num_branches, len(num_channels)) elif num_branches != len(num_in_chs): error_msg = 'num_branches({}) <> num_in_chs({})'.format(num_branches, len(num_in_chs)) if error_msg: _logger.error(error_msg) raise ValueError(error_msg) def _make_one_branch(self, branch_index, block_type, num_blocks, num_channels, stride=1): downsample = None if stride != 1 or self.num_in_chs[branch_index] != num_channels[branch_index] * block_type.expansion: downsample = nn.Sequential( nn.Conv2d( self.num_in_chs[branch_index], num_channels[branch_index] * block_type.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(num_channels[branch_index] * block_type.expansion, momentum=_BN_MOMENTUM), ) layers = [block_type(self.num_in_chs[branch_index], num_channels[branch_index], stride, downsample)] self.num_in_chs[branch_index] = num_channels[branch_index] * block_type.expansion for i in range(1, num_blocks[branch_index]): layers.append(block_type(self.num_in_chs[branch_index], num_channels[branch_index])) return nn.Sequential(*layers) def _make_branches(self, num_branches, block_type, num_blocks, num_channels): branches = [] for i in range(num_branches): branches.append(self._make_one_branch(i, block_type, num_blocks, num_channels)) return nn.ModuleList(branches) def _make_fuse_layers(self): if self.num_branches == 1: return nn.Identity() num_branches = self.num_branches num_in_chs = self.num_in_chs fuse_layers = [] for i in range(num_branches if self.multi_scale_output else 1): fuse_layer = [] for j in range(num_branches): if j > i: fuse_layer.append(nn.Sequential( nn.Conv2d(num_in_chs[j], num_in_chs[i], 1, 1, 0, bias=False), nn.BatchNorm2d(num_in_chs[i], momentum=_BN_MOMENTUM), nn.Upsample(scale_factor=2 ** (j - i), mode='nearest'))) elif j == i: fuse_layer.append(nn.Identity()) else: conv3x3s = [] for k in range(i - j): if k == i - j - 1: num_out_chs_conv3x3 = num_in_chs[i] conv3x3s.append(nn.Sequential( nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False), nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM) )) else: num_out_chs_conv3x3 = num_in_chs[j] conv3x3s.append(nn.Sequential( nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False), nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM), nn.ReLU(False) )) fuse_layer.append(nn.Sequential(*conv3x3s)) fuse_layers.append(nn.ModuleList(fuse_layer)) return nn.ModuleList(fuse_layers) def get_num_in_chs(self): return self.num_in_chs def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: if self.num_branches == 1: return [self.branches[0](x[0])] for i, branch in enumerate(self.branches): x[i] = branch(x[i]) x_fuse = [] for i, fuse_outer in enumerate(self.fuse_layers): y = None for j, f in enumerate(fuse_outer): if y is None: y = f(x[j]) else: y = y + f(x[j]) x_fuse.append(self.fuse_act(y)) return x_fuse class SequentialList(nn.Sequential): def __init__(self, *args): super(SequentialList, self).__init__(*args) @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (List[torch.Tensor]) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> (List[torch.Tensor]) pass def forward(self, x) -> List[torch.Tensor]: for module in self: x = module(x) return x @torch.jit.interface class ModuleInterface(torch.nn.Module): def forward(self, input: torch.Tensor) -> torch.Tensor: # `input` has a same name in Sequential forward pass block_types_dict = { 'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck } class HighResolutionNet(nn.Module): def __init__( self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0.0, head='classification', **kwargs, ): super(HighResolutionNet, self).__init__() self.num_classes = num_classes assert output_stride == 32 # FIXME support dilation cfg.update(**kwargs) stem_width = cfg['stem_width'] self.conv1 = nn.Conv2d(in_chans, stem_width, kernel_size=3, stride=2, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(stem_width, momentum=_BN_MOMENTUM) self.act1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(stem_width, 64, kernel_size=3, stride=2, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(64, momentum=_BN_MOMENTUM) self.act2 = nn.ReLU(inplace=True) self.stage1_cfg = cfg['stage1'] num_channels = self.stage1_cfg['num_channels'][0] block_type = block_types_dict[self.stage1_cfg['block_type']] num_blocks = self.stage1_cfg['num_blocks'][0] self.layer1 = self._make_layer(block_type, 64, num_channels, num_blocks) stage1_out_channel = block_type.expansion * num_channels self.stage2_cfg = cfg['stage2'] num_channels = self.stage2_cfg['num_channels'] block_type = block_types_dict[self.stage2_cfg['block_type']] num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels) self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels) self.stage3_cfg = cfg['stage3'] num_channels = self.stage3_cfg['num_channels'] block_type = block_types_dict[self.stage3_cfg['block_type']] num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels) self.stage4_cfg = cfg['stage4'] num_channels = self.stage4_cfg['num_channels'] block_type = block_types_dict[self.stage4_cfg['block_type']] num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True) self.head = head self.head_channels = None # set if _make_head called head_conv_bias = cfg.pop('head_conv_bias', True) if head == 'classification': # Classification Head self.num_features = 2048 self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head( pre_stage_channels, conv_bias=head_conv_bias, ) self.global_pool, self.head_drop, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate, ) else: if head == 'incre': self.num_features = 2048 self.incre_modules, _, _ = self._make_head(pre_stage_channels, incre_only=True) else: self.num_features = 256 self.incre_modules = None self.global_pool = nn.Identity() self.head_drop = nn.Identity() self.classifier = nn.Identity() curr_stride = 2 # module names aren't actually valid here, hook or FeatureNet based extraction would not work self.feature_info = [dict(num_chs=64, reduction=curr_stride, module='stem')] for i, c in enumerate(self.head_channels if self.head_channels else num_channels): curr_stride *= 2 c = c * 4 if self.head_channels else c # head block_type expansion factor of 4 self.feature_info += [dict(num_chs=c, reduction=curr_stride, module=f'stage{i + 1}')] self.init_weights() def _make_head(self, pre_stage_channels, incre_only=False, conv_bias=True): head_block_type = Bottleneck self.head_channels = [32, 64, 128, 256] # Increasing the #channels on each resolution # from C, 2C, 4C, 8C to 128, 256, 512, 1024 incre_modules = [] for i, channels in enumerate(pre_stage_channels): incre_modules.append(self._make_layer(head_block_type, channels, self.head_channels[i], 1, stride=1)) incre_modules = nn.ModuleList(incre_modules) if incre_only: return incre_modules, None, None # downsampling modules downsamp_modules = [] for i in range(len(pre_stage_channels) - 1): in_channels = self.head_channels[i] * head_block_type.expansion out_channels = self.head_channels[i + 1] * head_block_type.expansion downsamp_module = nn.Sequential( nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1, bias=conv_bias), nn.BatchNorm2d(out_channels, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True) ) downsamp_modules.append(downsamp_module) downsamp_modules = nn.ModuleList(downsamp_modules) final_layer = nn.Sequential( nn.Conv2d( in_channels=self.head_channels[3] * head_block_type.expansion, out_channels=self.num_features, kernel_size=1, stride=1, padding=0, bias=conv_bias), nn.BatchNorm2d(self.num_features, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True) ) return incre_modules, downsamp_modules, final_layer def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): num_branches_cur = len(num_channels_cur_layer) num_branches_pre = len(num_channels_pre_layer) transition_layers = [] for i in range(num_branches_cur): if i < num_branches_pre: if num_channels_cur_layer[i] != num_channels_pre_layer[i]: transition_layers.append(nn.Sequential( nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), nn.BatchNorm2d(num_channels_cur_layer[i], momentum=_BN_MOMENTUM), nn.ReLU(inplace=True))) else: transition_layers.append(nn.Identity()) else: conv3x3s = [] for j in range(i + 1 - num_branches_pre): _in_chs = num_channels_pre_layer[-1] _out_chs = num_channels_cur_layer[i] if j == i - num_branches_pre else _in_chs conv3x3s.append(nn.Sequential( nn.Conv2d(_in_chs, _out_chs, 3, 2, 1, bias=False), nn.BatchNorm2d(_out_chs, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True))) transition_layers.append(nn.Sequential(*conv3x3s)) return nn.ModuleList(transition_layers) def _make_layer(self, block_type, inplanes, planes, block_types, stride=1): downsample = None if stride != 1 or inplanes != planes * block_type.expansion: downsample = nn.Sequential( nn.Conv2d(inplanes, planes * block_type.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block_type.expansion, momentum=_BN_MOMENTUM), ) layers = [block_type(inplanes, planes, stride, downsample)] inplanes = planes * block_type.expansion for i in range(1, block_types): layers.append(block_type(inplanes, planes)) return nn.Sequential(*layers) def _make_stage(self, layer_config, num_in_chs, multi_scale_output=True): num_modules = layer_config['num_modules'] num_branches = layer_config['num_branches'] num_blocks = layer_config['num_blocks'] num_channels = layer_config['num_channels'] block_type = block_types_dict[layer_config['block_type']] fuse_method = layer_config['fuse_method'] modules = [] for i in range(num_modules): # multi_scale_output is only used last module reset_multi_scale_output = multi_scale_output or i < num_modules - 1 modules.append(HighResolutionModule( num_branches, block_type, num_blocks, num_in_chs, num_channels, fuse_method, reset_multi_scale_output) ) num_in_chs = modules[-1].get_num_in_chs() return SequentialList(*modules), num_in_chs @torch.jit.ignore def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^conv[12]|bn[12]', block_types=r'^(?:layer|stage|transition)(\d+)' if coarse else [ (r'^layer(\d+)\.(\d+)', None), (r'^stage(\d+)\.(\d+)', None), (r'^transition(\d+)', (99999,)), ], ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, "gradient checkpointing not supported" @torch.jit.ignore def get_classifier(self): return self.classifier def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def stages(self, x) -> List[torch.Tensor]: x = self.layer1(x) xl = [t(x) for i, t in enumerate(self.transition1)] yl = self.stage2(xl) xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition2)] yl = self.stage3(xl) xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition3)] yl = self.stage4(xl) return yl def forward_features(self, x): # Stem x = self.conv1(x) x = self.bn1(x) x = self.act1(x) x = self.conv2(x) x = self.bn2(x) x = self.act2(x) # Stages yl = self.stages(x) if self.incre_modules is None or self.downsamp_modules is None: return yl y = None for i, incre in enumerate(self.incre_modules): if y is None: y = incre(yl[i]) else: down: ModuleInterface = self.downsamp_modules[i - 1] # needed for torchscript module indexing y = incre(yl[i]) + down.forward(y) y = self.final_layer(y) return y def forward_head(self, x, pre_logits: bool = False): # Classification Head x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.classifier(x) def forward(self, x): y = self.forward_features(x) x = self.forward_head(y) return x class HighResolutionNetFeatures(HighResolutionNet): """HighResolutionNet feature extraction The design of HRNet makes it easy to grab feature maps, this class provides a simple wrapper to do so. It would be more complicated to use the FeatureNet helpers. The `feature_location=incre` allows grabbing increased channel count features using part of the classification head. If `feature_location=''` the default HRNet features are returned. First stem conv is used for stride 2 features. """ def __init__( self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0.0, feature_location='incre', out_indices=(0, 1, 2, 3, 4), **kwargs, ): assert feature_location in ('incre', '') super(HighResolutionNetFeatures, self).__init__( cfg, in_chans=in_chans, num_classes=num_classes, output_stride=output_stride, global_pool=global_pool, drop_rate=drop_rate, head=feature_location, **kwargs, ) self.feature_info = FeatureInfo(self.feature_info, out_indices) self._out_idx = {f['index'] for f in self.feature_info.get_dicts()} def forward_features(self, x): assert False, 'Not supported' def forward(self, x) -> List[torch.tensor]: out = [] x = self.conv1(x) x = self.bn1(x) x = self.act1(x) if 0 in self._out_idx: out.append(x) x = self.conv2(x) x = self.bn2(x) x = self.act2(x) x = self.stages(x) if self.incre_modules is not None: x = [incre(f) for f, incre in zip(x, self.incre_modules)] for i, f in enumerate(x): if i + 1 in self._out_idx: out.append(f) return out def _create_hrnet(variant, pretrained=False, cfg_variant=None, **model_kwargs): model_cls = HighResolutionNet features_only = False kwargs_filter = None if model_kwargs.pop('features_only', False): model_cls = HighResolutionNetFeatures kwargs_filter = ('num_classes', 'global_pool') features_only = True cfg_variant = cfg_variant or variant model = build_model_with_cfg( model_cls, variant, pretrained, model_cfg=cfg_cls[cfg_variant], pretrained_strict=not features_only, kwargs_filter=kwargs_filter, **model_kwargs, ) if features_only: model.pretrained_cfg = pretrained_cfg_for_features(model.default_cfg) model.default_cfg = model.pretrained_cfg # backwards compat return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'hrnet_w18_small.gluon_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'), 'hrnet_w18_small.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w18_small_v2.gluon_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'), 'hrnet_w18_small_v2.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w18.ms_aug_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, ), 'hrnet_w18.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w30.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w32.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w40.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w44.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w48.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w64.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w18_ssld.paddle_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288) ), 'hrnet_w48_ssld.paddle_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288) ), }) @register_model def hrnet_w18_small(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w18_small', pretrained, **kwargs) @register_model def hrnet_w18_small_v2(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w18_small_v2', pretrained, **kwargs) @register_model def hrnet_w18(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w18', pretrained, **kwargs) @register_model def hrnet_w30(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w30', pretrained, **kwargs) @register_model def hrnet_w32(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w32', pretrained, **kwargs) @register_model def hrnet_w40(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w40', pretrained, **kwargs) @register_model def hrnet_w44(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w44', pretrained, **kwargs) @register_model def hrnet_w48(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w48', pretrained, **kwargs) @register_model def hrnet_w64(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w64', pretrained, **kwargs) @register_model def hrnet_w18_ssld(pretrained=False, **kwargs) -> HighResolutionNet: kwargs.setdefault('head_conv_bias', False) return _create_hrnet('hrnet_w18_ssld', cfg_variant='hrnet_w18', pretrained=pretrained, **kwargs) @register_model def hrnet_w48_ssld(pretrained=False, **kwargs) -> HighResolutionNet: kwargs.setdefault('head_conv_bias', False) return _create_hrnet('hrnet_w48_ssld', cfg_variant='hrnet_w48', pretrained=pretrained, **kwargs)
pytorch-image-models/timm/models/hrnet.py/0
{ "file_path": "pytorch-image-models/timm/models/hrnet.py", "repo_id": "pytorch-image-models", "token_count": 17584 }
180
""" Normalization Free Nets. NFNet, NF-RegNet, NF-ResNet (pre-activation) Models Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 Paper: `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets Status: * These models are a work in progress, experiments ongoing. * Pretrained weights for two models so far, more to come. * Model details updated to closer match official JAX code now that it's released * NF-ResNet, NF-RegNet-B, and NFNet-F models supported Hacked together by / copyright Ross Wightman, 2021. """ from collections import OrderedDict from dataclasses import dataclass, replace from functools import partial from typing import Callable, Tuple, Optional import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ClassifierHead, DropPath, AvgPool2dSame, ScaledStdConv2d, ScaledStdConv2dSame, \ get_act_layer, get_act_fn, get_attn, make_divisible from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['NormFreeNet', 'NfCfg'] # model_registry will add each entrypoint fn to this @dataclass class NfCfg: depths: Tuple[int, int, int, int] channels: Tuple[int, int, int, int] alpha: float = 0.2 stem_type: str = '3x3' stem_chs: Optional[int] = None group_size: Optional[int] = None attn_layer: Optional[str] = None attn_kwargs: dict = None attn_gain: float = 2.0 # NF correction gain to apply if attn layer is used width_factor: float = 1.0 bottle_ratio: float = 0.5 num_features: int = 0 # num out_channels for final conv, no final_conv if 0 ch_div: int = 8 # round channels % 8 == 0 to keep tensor-core use optimal reg: bool = False # enables EfficientNet-like options used in RegNet variants, expand from in_chs, se in middle extra_conv: bool = False # extra 3x3 bottleneck convolution for NFNet models gamma_in_act: bool = False same_padding: bool = False std_conv_eps: float = 1e-5 skipinit: bool = False # disabled by default, non-trivial performance impact zero_init_fc: bool = False act_layer: str = 'silu' class GammaAct(nn.Module): def __init__(self, act_type='relu', gamma: float = 1.0, inplace=False): super().__init__() self.act_fn = get_act_fn(act_type) self.gamma = gamma self.inplace = inplace def forward(self, x): return self.act_fn(x, inplace=self.inplace).mul_(self.gamma) def act_with_gamma(act_type, gamma: float = 1.): def _create(inplace=False): return GammaAct(act_type, gamma=gamma, inplace=inplace) return _create class DownsampleAvg(nn.Module): def __init__( self, in_chs: int, out_chs: int, stride: int = 1, dilation: int = 1, first_dilation: Optional[int] = None, conv_layer: Callable = ScaledStdConv2d, ): """ AvgPool Downsampling as in 'D' ResNet variants. Support for dilation.""" super(DownsampleAvg, self).__init__() avg_stride = stride if dilation == 1 else 1 if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) else: self.pool = nn.Identity() self.conv = conv_layer(in_chs, out_chs, 1, stride=1) def forward(self, x): return self.conv(self.pool(x)) @register_notrace_module # reason: mul_ causes FX to drop a relevant node. https://github.com/pytorch/pytorch/issues/68301 class NormFreeBlock(nn.Module): """Normalization-Free pre-activation block. """ def __init__( self, in_chs: int, out_chs: Optional[int] = None, stride: int = 1, dilation: int = 1, first_dilation: Optional[int] = None, alpha: float = 1.0, beta: float = 1.0, bottle_ratio: float = 0.25, group_size: Optional[int] = None, ch_div: int = 1, reg: bool = True, extra_conv: bool = False, skipinit: bool = False, attn_layer: Optional[Callable] = None, attn_gain: bool = 2.0, act_layer: Optional[Callable] = None, conv_layer: Callable = ScaledStdConv2d, drop_path_rate: float = 0., ): super().__init__() first_dilation = first_dilation or dilation out_chs = out_chs or in_chs # RegNet variants scale bottleneck from in_chs, otherwise scale from out_chs like ResNet mid_chs = make_divisible(in_chs * bottle_ratio if reg else out_chs * bottle_ratio, ch_div) groups = 1 if not group_size else mid_chs // group_size if group_size and group_size % ch_div == 0: mid_chs = group_size * groups # correct mid_chs if group_size divisible by ch_div, otherwise error self.alpha = alpha self.beta = beta self.attn_gain = attn_gain if in_chs != out_chs or stride != 1 or dilation != first_dilation: self.downsample = DownsampleAvg( in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, conv_layer=conv_layer, ) else: self.downsample = None self.act1 = act_layer() self.conv1 = conv_layer(in_chs, mid_chs, 1) self.act2 = act_layer(inplace=True) self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) if extra_conv: self.act2b = act_layer(inplace=True) self.conv2b = conv_layer(mid_chs, mid_chs, 3, stride=1, dilation=dilation, groups=groups) else: self.act2b = None self.conv2b = None if reg and attn_layer is not None: self.attn = attn_layer(mid_chs) # RegNet blocks apply attn btw conv2 & 3 else: self.attn = None self.act3 = act_layer() self.conv3 = conv_layer(mid_chs, out_chs, 1, gain_init=1. if skipinit else 0.) if not reg and attn_layer is not None: self.attn_last = attn_layer(out_chs) # ResNet blocks apply attn after conv3 else: self.attn_last = None self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.skipinit_gain = nn.Parameter(torch.tensor(0.)) if skipinit else None def forward(self, x): out = self.act1(x) * self.beta # shortcut branch shortcut = x if self.downsample is not None: shortcut = self.downsample(out) # residual branch out = self.conv1(out) out = self.conv2(self.act2(out)) if self.conv2b is not None: out = self.conv2b(self.act2b(out)) if self.attn is not None: out = self.attn_gain * self.attn(out) out = self.conv3(self.act3(out)) if self.attn_last is not None: out = self.attn_gain * self.attn_last(out) out = self.drop_path(out) if self.skipinit_gain is not None: out.mul_(self.skipinit_gain) out = out * self.alpha + shortcut return out def create_stem( in_chs: int, out_chs: int, stem_type: str = '', conv_layer: Optional[Callable] = None, act_layer: Optional[Callable] = None, preact_feature: bool = True, ): stem_stride = 2 stem_feature = dict(num_chs=out_chs, reduction=2, module='stem.conv') stem = OrderedDict() assert stem_type in ('', 'deep', 'deep_tiered', 'deep_quad', '3x3', '7x7', 'deep_pool', '3x3_pool', '7x7_pool') if 'deep' in stem_type: if 'quad' in stem_type: # 4 deep conv stack as in NFNet-F models assert not 'pool' in stem_type stem_chs = (out_chs // 8, out_chs // 4, out_chs // 2, out_chs) strides = (2, 1, 1, 2) stem_stride = 4 stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv3') else: if 'tiered' in stem_type: stem_chs = (3 * out_chs // 8, out_chs // 2, out_chs) # 'T' resnets in resnet.py else: stem_chs = (out_chs // 2, out_chs // 2, out_chs) # 'D' ResNets strides = (2, 1, 1) stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv2') last_idx = len(stem_chs) - 1 for i, (c, s) in enumerate(zip(stem_chs, strides)): stem[f'conv{i + 1}'] = conv_layer(in_chs, c, kernel_size=3, stride=s) if i != last_idx: stem[f'act{i + 2}'] = act_layer(inplace=True) in_chs = c elif '3x3' in stem_type: # 3x3 stem conv as in RegNet stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=3, stride=2) else: # 7x7 stem conv as in ResNet stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) if 'pool' in stem_type: stem['pool'] = nn.MaxPool2d(3, stride=2, padding=1) stem_stride = 4 return nn.Sequential(stem), stem_stride, stem_feature # from https://github.com/deepmind/deepmind-research/tree/master/nfnets _nonlin_gamma = dict( identity=1.0, celu=1.270926833152771, elu=1.2716004848480225, gelu=1.7015043497085571, leaky_relu=1.70590341091156, log_sigmoid=1.9193484783172607, log_softmax=1.0002083778381348, relu=1.7139588594436646, relu6=1.7131484746932983, selu=1.0008515119552612, sigmoid=4.803835391998291, silu=1.7881293296813965, softsign=2.338853120803833, softplus=1.9203323125839233, tanh=1.5939117670059204, ) class NormFreeNet(nn.Module): """ Normalization-Free Network As described in : `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 and `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 This model aims to cover both the NFRegNet-Bx models as detailed in the paper's code snippets and the (preact) ResNet models described earlier in the paper. There are a few differences: * channels are rounded to be divisible by 8 by default (keep tensor core kernels happy), this changes channel dim and param counts slightly from the paper models * activation correcting gamma constants are moved into the ScaledStdConv as it has less performance impact in PyTorch when done with the weight scaling there. This likely wasn't a concern in the JAX impl. * a config option `gamma_in_act` can be enabled to not apply gamma in StdConv as described above, but apply it in each activation. This is slightly slower, numerically different, but matches official impl. * skipinit is disabled by default, it seems to have a rather drastic impact on GPU memory use and throughput for what it is/does. Approx 8-10% throughput loss. """ def __init__( self, cfg: NfCfg, num_classes: int = 1000, in_chans: int = 3, global_pool: str = 'avg', output_stride: int = 32, drop_rate: float = 0., drop_path_rate: float = 0., **kwargs, ): """ Args: cfg: Model architecture configuration. num_classes: Number of classifier classes. in_chans: Number of input channels. global_pool: Global pooling type. output_stride: Output stride of network, one of (8, 16, 32). drop_rate: Dropout rate. drop_path_rate: Stochastic depth drop-path rate. **kwargs: Extra kwargs overlayed onto cfg. """ super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False cfg = replace(cfg, **kwargs) assert cfg.act_layer in _nonlin_gamma, f"Please add non-linearity constants for activation ({cfg.act_layer})." conv_layer = ScaledStdConv2dSame if cfg.same_padding else ScaledStdConv2d if cfg.gamma_in_act: act_layer = act_with_gamma(cfg.act_layer, gamma=_nonlin_gamma[cfg.act_layer]) conv_layer = partial(conv_layer, eps=cfg.std_conv_eps) else: act_layer = get_act_layer(cfg.act_layer) conv_layer = partial(conv_layer, gamma=_nonlin_gamma[cfg.act_layer], eps=cfg.std_conv_eps) attn_layer = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None stem_chs = make_divisible((cfg.stem_chs or cfg.channels[0]) * cfg.width_factor, cfg.ch_div) self.stem, stem_stride, stem_feat = create_stem( in_chans, stem_chs, cfg.stem_type, conv_layer=conv_layer, act_layer=act_layer, ) self.feature_info = [stem_feat] drop_path_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] prev_chs = stem_chs net_stride = stem_stride dilation = 1 expected_var = 1.0 stages = [] for stage_idx, stage_depth in enumerate(cfg.depths): stride = 1 if stage_idx == 0 and stem_stride > 2 else 2 if net_stride >= output_stride and stride > 1: dilation *= stride stride = 1 net_stride *= stride first_dilation = 1 if dilation in (1, 2) else 2 blocks = [] for block_idx in range(cfg.depths[stage_idx]): first_block = block_idx == 0 and stage_idx == 0 out_chs = make_divisible(cfg.channels[stage_idx] * cfg.width_factor, cfg.ch_div) blocks += [NormFreeBlock( in_chs=prev_chs, out_chs=out_chs, alpha=cfg.alpha, beta=1. / expected_var ** 0.5, stride=stride if block_idx == 0 else 1, dilation=dilation, first_dilation=first_dilation, group_size=cfg.group_size, bottle_ratio=1. if cfg.reg and first_block else cfg.bottle_ratio, ch_div=cfg.ch_div, reg=cfg.reg, extra_conv=cfg.extra_conv, skipinit=cfg.skipinit, attn_layer=attn_layer, attn_gain=cfg.attn_gain, act_layer=act_layer, conv_layer=conv_layer, drop_path_rate=drop_path_rates[stage_idx][block_idx], )] if block_idx == 0: expected_var = 1. # expected var is reset after first block of each stage expected_var += cfg.alpha ** 2 # Even if reset occurs, increment expected variance first_dilation = dilation prev_chs = out_chs self.feature_info += [dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')] stages += [nn.Sequential(*blocks)] self.stages = nn.Sequential(*stages) if cfg.num_features: # The paper NFRegNet models have an EfficientNet-like final head convolution. self.num_features = make_divisible(cfg.width_factor * cfg.num_features, cfg.ch_div) self.final_conv = conv_layer(prev_chs, self.num_features, 1) self.feature_info[-1] = dict(num_chs=self.num_features, reduction=net_stride, module=f'final_conv') else: self.num_features = prev_chs self.final_conv = nn.Identity() self.final_act = act_layer(inplace=cfg.num_features > 0) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, ) for n, m in self.named_modules(): if 'fc' in n and isinstance(m, nn.Linear): if cfg.zero_init_fc: nn.init.zeros_(m.weight) else: nn.init.normal_(m.weight, 0., .01) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='linear') if m.bias is not None: nn.init.zeros_(m.bias) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', blocks=[ (r'^stages\.(\d+)' if coarse else r'^stages\.(\d+)\.(\d+)', None), (r'^final_conv', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool='avg'): self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) x = self.final_conv(x) x = self.final_act(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _nfres_cfg( depths, channels=(256, 512, 1024, 2048), group_size=None, act_layer='relu', attn_layer=None, attn_kwargs=None, ): attn_kwargs = attn_kwargs or {} cfg = NfCfg( depths=depths, channels=channels, stem_type='7x7_pool', stem_chs=64, bottle_ratio=0.25, group_size=group_size, act_layer=act_layer, attn_layer=attn_layer, attn_kwargs=attn_kwargs, ) return cfg def _nfreg_cfg(depths, channels=(48, 104, 208, 440)): num_features = 1280 * channels[-1] // 440 attn_kwargs = dict(rd_ratio=0.5) cfg = NfCfg( depths=depths, channels=channels, stem_type='3x3', group_size=8, width_factor=0.75, bottle_ratio=2.25, num_features=num_features, reg=True, attn_layer='se', attn_kwargs=attn_kwargs, ) return cfg def _nfnet_cfg( depths, channels=(256, 512, 1536, 1536), group_size=128, bottle_ratio=0.5, feat_mult=2., act_layer='gelu', attn_layer='se', attn_kwargs=None, ): num_features = int(channels[-1] * feat_mult) attn_kwargs = attn_kwargs if attn_kwargs is not None else dict(rd_ratio=0.5) cfg = NfCfg( depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=group_size, bottle_ratio=bottle_ratio, extra_conv=True, num_features=num_features, act_layer=act_layer, attn_layer=attn_layer, attn_kwargs=attn_kwargs, ) return cfg def _dm_nfnet_cfg( depths, channels=(256, 512, 1536, 1536), act_layer='gelu', skipinit=True, ): cfg = NfCfg( depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=128, bottle_ratio=0.5, extra_conv=True, gamma_in_act=True, same_padding=True, skipinit=skipinit, num_features=int(channels[-1] * 2.0), act_layer=act_layer, attn_layer='se', attn_kwargs=dict(rd_ratio=0.5), ) return cfg model_cfgs = dict( # NFNet-F models w/ GELU compatible with DeepMind weights dm_nfnet_f0=_dm_nfnet_cfg(depths=(1, 2, 6, 3)), dm_nfnet_f1=_dm_nfnet_cfg(depths=(2, 4, 12, 6)), dm_nfnet_f2=_dm_nfnet_cfg(depths=(3, 6, 18, 9)), dm_nfnet_f3=_dm_nfnet_cfg(depths=(4, 8, 24, 12)), dm_nfnet_f4=_dm_nfnet_cfg(depths=(5, 10, 30, 15)), dm_nfnet_f5=_dm_nfnet_cfg(depths=(6, 12, 36, 18)), dm_nfnet_f6=_dm_nfnet_cfg(depths=(7, 14, 42, 21)), # NFNet-F models w/ GELU nfnet_f0=_nfnet_cfg(depths=(1, 2, 6, 3)), nfnet_f1=_nfnet_cfg(depths=(2, 4, 12, 6)), nfnet_f2=_nfnet_cfg(depths=(3, 6, 18, 9)), nfnet_f3=_nfnet_cfg(depths=(4, 8, 24, 12)), nfnet_f4=_nfnet_cfg(depths=(5, 10, 30, 15)), nfnet_f5=_nfnet_cfg(depths=(6, 12, 36, 18)), nfnet_f6=_nfnet_cfg(depths=(7, 14, 42, 21)), nfnet_f7=_nfnet_cfg(depths=(8, 16, 48, 24)), # Experimental 'light' versions of NFNet-F that are little leaner, w/ SiLU act nfnet_l0=_nfnet_cfg( depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, attn_kwargs=dict(rd_ratio=0.25, rd_divisor=8), act_layer='silu'), eca_nfnet_l0=_nfnet_cfg( depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), eca_nfnet_l1=_nfnet_cfg( depths=(2, 4, 12, 6), feat_mult=2, group_size=64, bottle_ratio=0.25, attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), eca_nfnet_l2=_nfnet_cfg( depths=(3, 6, 18, 9), feat_mult=2, group_size=64, bottle_ratio=0.25, attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), eca_nfnet_l3=_nfnet_cfg( depths=(4, 8, 24, 12), feat_mult=2, group_size=64, bottle_ratio=0.25, attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), # EffNet influenced RegNet defs. # NOTE: These aren't quite the official ver, ch_div=1 must be set for exact ch counts. I round to ch_div=8. nf_regnet_b0=_nfreg_cfg(depths=(1, 3, 6, 6)), nf_regnet_b1=_nfreg_cfg(depths=(2, 4, 7, 7)), nf_regnet_b2=_nfreg_cfg(depths=(2, 4, 8, 8), channels=(56, 112, 232, 488)), nf_regnet_b3=_nfreg_cfg(depths=(2, 5, 9, 9), channels=(56, 128, 248, 528)), nf_regnet_b4=_nfreg_cfg(depths=(2, 6, 11, 11), channels=(64, 144, 288, 616)), nf_regnet_b5=_nfreg_cfg(depths=(3, 7, 14, 14), channels=(80, 168, 336, 704)), # ResNet (preact, D style deep stem/avg down) defs nf_resnet26=_nfres_cfg(depths=(2, 2, 2, 2)), nf_resnet50=_nfres_cfg(depths=(3, 4, 6, 3)), nf_resnet101=_nfres_cfg(depths=(3, 4, 23, 3)), nf_seresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), nf_seresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), nf_seresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), nf_ecaresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='eca', attn_kwargs=dict()), nf_ecaresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='eca', attn_kwargs=dict()), nf_ecaresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='eca', attn_kwargs=dict()), ) def _create_normfreenet(variant, pretrained=False, **kwargs): model_cfg = model_cfgs[variant] feature_cfg = dict(flatten_sequential=True) return build_model_with_cfg( NormFreeNet, variant, pretrained, model_cfg=model_cfg, feature_cfg=feature_cfg, **kwargs, ) def _dcfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'dm_nfnet_f0.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f0-604f9c3a.pth', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), crop_pct=.9, crop_mode='squash'), 'dm_nfnet_f1.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f1-fc540f82.pth', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320), crop_pct=0.91, crop_mode='squash'), 'dm_nfnet_f2.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f2-89875923.pth', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352), crop_pct=0.92, crop_mode='squash'), 'dm_nfnet_f3.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f3-d74ab3aa.pth', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416), crop_pct=0.94, crop_mode='squash'), 'dm_nfnet_f4.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f4-0ac5b10b.pth', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512), crop_pct=0.951, crop_mode='squash'), 'dm_nfnet_f5.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f5-ecb20ab1.pth', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544), crop_pct=0.954, crop_mode='squash'), 'dm_nfnet_f6.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f6-e0f12116.pth', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576), crop_pct=0.956, crop_mode='squash'), 'nfnet_f0': _dcfg( url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)), 'nfnet_f1': _dcfg( url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)), 'nfnet_f2': _dcfg( url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)), 'nfnet_f3': _dcfg( url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)), 'nfnet_f4': _dcfg( url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)), 'nfnet_f5': _dcfg( url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)), 'nfnet_f6': _dcfg( url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)), 'nfnet_f7': _dcfg( url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)), 'nfnet_l0.ra2_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nfnet_l0_ra2-45c6688d.pth', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'eca_nfnet_l0.ra2_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l0_ra2-e3e9ac50.pth', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'eca_nfnet_l1.ra2_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l1_ra2-7dce93cd.pth', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 320, 320), test_crop_pct=1.0), 'eca_nfnet_l2.ra3_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l2_ra3-da781a61.pth', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), test_crop_pct=1.0), 'eca_nfnet_l3': _dcfg( url='', pool_size=(11, 11), input_size=(3, 352, 352), test_input_size=(3, 448, 448), test_crop_pct=1.0), 'nf_regnet_b0': _dcfg( url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), first_conv='stem.conv'), 'nf_regnet_b1.ra2_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_regnet_b1_256_ra2-ad85cfef.pth', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), first_conv='stem.conv'), # NOT to paper spec 'nf_regnet_b2': _dcfg( url='', pool_size=(8, 8), input_size=(3, 240, 240), test_input_size=(3, 272, 272), first_conv='stem.conv'), 'nf_regnet_b3': _dcfg( url='', pool_size=(9, 9), input_size=(3, 288, 288), test_input_size=(3, 320, 320), first_conv='stem.conv'), 'nf_regnet_b4': _dcfg( url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), first_conv='stem.conv'), 'nf_regnet_b5': _dcfg( url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 456, 456), first_conv='stem.conv'), 'nf_resnet26': _dcfg(url='', first_conv='stem.conv'), 'nf_resnet50.ra2_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_resnet50_ra2-9f236009.pth', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), crop_pct=0.94, first_conv='stem.conv'), 'nf_resnet101': _dcfg(url='', first_conv='stem.conv'), 'nf_seresnet26': _dcfg(url='', first_conv='stem.conv'), 'nf_seresnet50': _dcfg(url='', first_conv='stem.conv'), 'nf_seresnet101': _dcfg(url='', first_conv='stem.conv'), 'nf_ecaresnet26': _dcfg(url='', first_conv='stem.conv'), 'nf_ecaresnet50': _dcfg(url='', first_conv='stem.conv'), 'nf_ecaresnet101': _dcfg(url='', first_conv='stem.conv'), }) @register_model def dm_nfnet_f0(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F0 (DeepMind weight compatible) `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('dm_nfnet_f0', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f1(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F1 (DeepMind weight compatible) `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('dm_nfnet_f1', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f2(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F2 (DeepMind weight compatible) `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('dm_nfnet_f2', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f3(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F3 (DeepMind weight compatible) `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('dm_nfnet_f3', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f4(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F4 (DeepMind weight compatible) `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('dm_nfnet_f4', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f5(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F5 (DeepMind weight compatible) `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('dm_nfnet_f5', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f6(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F6 (DeepMind weight compatible) `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('dm_nfnet_f6', pretrained=pretrained, **kwargs) @register_model def nfnet_f0(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F0 `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('nfnet_f0', pretrained=pretrained, **kwargs) @register_model def nfnet_f1(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F1 `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('nfnet_f1', pretrained=pretrained, **kwargs) @register_model def nfnet_f2(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F2 `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('nfnet_f2', pretrained=pretrained, **kwargs) @register_model def nfnet_f3(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F3 `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('nfnet_f3', pretrained=pretrained, **kwargs) @register_model def nfnet_f4(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F4 `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('nfnet_f4', pretrained=pretrained, **kwargs) @register_model def nfnet_f5(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F5 `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('nfnet_f5', pretrained=pretrained, **kwargs) @register_model def nfnet_f6(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F6 `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('nfnet_f6', pretrained=pretrained, **kwargs) @register_model def nfnet_f7(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F7 `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('nfnet_f7', pretrained=pretrained, **kwargs) @register_model def nfnet_l0(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-L0b w/ SiLU My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & SE ratio """ return _create_normfreenet('nfnet_l0', pretrained=pretrained, **kwargs) @register_model def eca_nfnet_l0(pretrained=False, **kwargs) -> NormFreeNet: """ ECA-NFNet-L0 w/ SiLU My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & ECA attn """ return _create_normfreenet('eca_nfnet_l0', pretrained=pretrained, **kwargs) @register_model def eca_nfnet_l1(pretrained=False, **kwargs) -> NormFreeNet: """ ECA-NFNet-L1 w/ SiLU My experimental 'light' model w/ F1 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn """ return _create_normfreenet('eca_nfnet_l1', pretrained=pretrained, **kwargs) @register_model def eca_nfnet_l2(pretrained=False, **kwargs) -> NormFreeNet: """ ECA-NFNet-L2 w/ SiLU My experimental 'light' model w/ F2 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn """ return _create_normfreenet('eca_nfnet_l2', pretrained=pretrained, **kwargs) @register_model def eca_nfnet_l3(pretrained=False, **kwargs) -> NormFreeNet: """ ECA-NFNet-L3 w/ SiLU My experimental 'light' model w/ F3 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn """ return _create_normfreenet('eca_nfnet_l3', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b0(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free RegNet-B0 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_regnet_b0', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b1(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free RegNet-B1 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_regnet_b1', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b2(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free RegNet-B2 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_regnet_b2', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b3(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free RegNet-B3 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_regnet_b3', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b4(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free RegNet-B4 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_regnet_b4', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b5(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free RegNet-B5 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_regnet_b5', pretrained=pretrained, **kwargs) @register_model def nf_resnet26(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free ResNet-26 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_resnet26', pretrained=pretrained, **kwargs) @register_model def nf_resnet50(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free ResNet-50 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_resnet50', pretrained=pretrained, **kwargs) @register_model def nf_resnet101(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free ResNet-101 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_resnet101', pretrained=pretrained, **kwargs) @register_model def nf_seresnet26(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free SE-ResNet26 """ return _create_normfreenet('nf_seresnet26', pretrained=pretrained, **kwargs) @register_model def nf_seresnet50(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free SE-ResNet50 """ return _create_normfreenet('nf_seresnet50', pretrained=pretrained, **kwargs) @register_model def nf_seresnet101(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free SE-ResNet101 """ return _create_normfreenet('nf_seresnet101', pretrained=pretrained, **kwargs) @register_model def nf_ecaresnet26(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free ECA-ResNet26 """ return _create_normfreenet('nf_ecaresnet26', pretrained=pretrained, **kwargs) @register_model def nf_ecaresnet50(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free ECA-ResNet50 """ return _create_normfreenet('nf_ecaresnet50', pretrained=pretrained, **kwargs) @register_model def nf_ecaresnet101(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free ECA-ResNet101 """ return _create_normfreenet('nf_ecaresnet101', pretrained=pretrained, **kwargs)
pytorch-image-models/timm/models/nfnet.py/0
{ "file_path": "pytorch-image-models/timm/models/nfnet.py", "repo_id": "pytorch-image-models", "token_count": 19131 }
181
""" Selective Kernel Networks (ResNet base) Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) This was inspired by reading 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268) and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer to the original paper with some modifications of my own to better balance param count vs accuracy. Hacked together by / Copyright 2020 Ross Wightman """ import math from torch import nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectiveKernel, ConvNormAct, create_attn from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .resnet import ResNet class SelectiveKernelBasic(nn.Module): expansion = 1 def __init__( self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None, ): super(SelectiveKernelBasic, self).__init__() sk_kwargs = sk_kwargs or {} conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer) assert cardinality == 1, 'BasicBlock only supports cardinality of 1' assert base_width == 64, 'BasicBlock doest not support changing base width' first_planes = planes // reduce_first outplanes = planes * self.expansion first_dilation = first_dilation or dilation self.conv1 = SelectiveKernel( inplanes, first_planes, stride=stride, dilation=first_dilation, aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs) self.conv2 = ConvNormAct( first_planes, outplanes, kernel_size=3, dilation=dilation, apply_act=False, **conv_kwargs) self.se = create_attn(attn_layer, outplanes) self.act = act_layer(inplace=True) self.downsample = downsample self.drop_path = drop_path def zero_init_last(self): if getattr(self.conv2.bn, 'weight', None) is not None: nn.init.zeros_(self.conv2.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.conv2(x) if self.se is not None: x = self.se(x) if self.drop_path is not None: x = self.drop_path(x) if self.downsample is not None: shortcut = self.downsample(shortcut) x += shortcut x = self.act(x) return x class SelectiveKernelBottleneck(nn.Module): expansion = 4 def __init__( self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None, ): super(SelectiveKernelBottleneck, self).__init__() sk_kwargs = sk_kwargs or {} conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer) width = int(math.floor(planes * (base_width / 64)) * cardinality) first_planes = width // reduce_first outplanes = planes * self.expansion first_dilation = first_dilation or dilation self.conv1 = ConvNormAct(inplanes, first_planes, kernel_size=1, **conv_kwargs) self.conv2 = SelectiveKernel( first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality, aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs) self.conv3 = ConvNormAct(width, outplanes, kernel_size=1, apply_act=False, **conv_kwargs) self.se = create_attn(attn_layer, outplanes) self.act = act_layer(inplace=True) self.downsample = downsample self.drop_path = drop_path def zero_init_last(self): if getattr(self.conv3.bn, 'weight', None) is not None: nn.init.zeros_(self.conv3.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) if self.se is not None: x = self.se(x) if self.drop_path is not None: x = self.drop_path(x) if self.downsample is not None: shortcut = self.downsample(shortcut) x += shortcut x = self.act(x) return x def _create_skresnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( ResNet, variant, pretrained, **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'fc', **kwargs } default_cfgs = generate_default_cfgs({ 'skresnet18.ra_in1k': _cfg(hf_hub_id='timm/'), 'skresnet34.ra_in1k': _cfg(hf_hub_id='timm/'), 'skresnet50.untrained': _cfg(), 'skresnet50d.untrained': _cfg( first_conv='conv1.0'), 'skresnext50_32x4d.ra_in1k': _cfg(hf_hub_id='timm/'), }) @register_model def skresnet18(pretrained=False, **kwargs) -> ResNet: """Constructs a Selective Kernel ResNet-18 model. Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this variation splits the input channels to the selective convolutions to keep param count down. """ sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) model_args = dict( block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnet18', pretrained, **model_args) @register_model def skresnet34(pretrained=False, **kwargs) -> ResNet: """Constructs a Selective Kernel ResNet-34 model. Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this variation splits the input channels to the selective convolutions to keep param count down. """ sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) model_args = dict( block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnet34', pretrained, **model_args) @register_model def skresnet50(pretrained=False, **kwargs) -> ResNet: """Constructs a Select Kernel ResNet-50 model. Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this variation splits the input channels to the selective convolutions to keep param count down. """ sk_kwargs = dict(split_input=True) model_args = dict( block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnet50', pretrained, **model_args) @register_model def skresnet50d(pretrained=False, **kwargs) -> ResNet: """Constructs a Select Kernel ResNet-50-D model. Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this variation splits the input channels to the selective convolutions to keep param count down. """ sk_kwargs = dict(split_input=True) model_args = dict( block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnet50d', pretrained, **model_args) @register_model def skresnext50_32x4d(pretrained=False, **kwargs) -> ResNet: """Constructs a Select Kernel ResNeXt50-32x4d model. This should be equivalent to the SKNet-50 model in the Select Kernel Paper """ sk_kwargs = dict(rd_ratio=1/16, rd_divisor=32, split_input=False) model_args = dict( block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnext50_32x4d', pretrained, **model_args)
pytorch-image-models/timm/models/sknet.py/0
{ "file_path": "pytorch-image-models/timm/models/sknet.py", "repo_id": "pytorch-image-models", "token_count": 3801 }
182
""" Ported to pytorch thanks to [tstandley](https://github.com/tstandley/Xception-PyTorch) @author: tstandley Adapted by cadene Creates an Xception Model as defined in: Francois Chollet Xception: Deep Learning with Depthwise Separable Convolutions https://arxiv.org/pdf/1610.02357.pdf This weights ported from the Keras implementation. Achieves the following performance on the validation set: Loss:0.9173 Prec@1:78.892 Prec@5:94.292 REMEMBER to set your image size to 3x299x299 for both test and validation normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 """ import torch.jit import torch.nn as nn import torch.nn.functional as F from timm.layers import create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['Xception'] class SeparableConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1): super(SeparableConv2d, self).__init__() self.conv1 = nn.Conv2d( in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=False) self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=False) def forward(self, x): x = self.conv1(x) x = self.pointwise(x) return x class Block(nn.Module): def __init__(self, in_channels, out_channels, reps, strides=1, start_with_relu=True, grow_first=True): super(Block, self).__init__() if out_channels != in_channels or strides != 1: self.skip = nn.Conv2d(in_channels, out_channels, 1, stride=strides, bias=False) self.skipbn = nn.BatchNorm2d(out_channels) else: self.skip = None rep = [] for i in range(reps): if grow_first: inc = in_channels if i == 0 else out_channels outc = out_channels else: inc = in_channels outc = in_channels if i < (reps - 1) else out_channels rep.append(nn.ReLU(inplace=True)) rep.append(SeparableConv2d(inc, outc, 3, stride=1, padding=1)) rep.append(nn.BatchNorm2d(outc)) if not start_with_relu: rep = rep[1:] else: rep[0] = nn.ReLU(inplace=False) if strides != 1: rep.append(nn.MaxPool2d(3, strides, 1)) self.rep = nn.Sequential(*rep) def forward(self, inp): x = self.rep(inp) if self.skip is not None: skip = self.skip(inp) skip = self.skipbn(skip) else: skip = inp x += skip return x class Xception(nn.Module): """ Xception optimized for the ImageNet dataset, as specified in https://arxiv.org/pdf/1610.02357.pdf """ def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg'): """ Constructor Args: num_classes: number of classes """ super(Xception, self).__init__() self.drop_rate = drop_rate self.global_pool = global_pool self.num_classes = num_classes self.num_features = 2048 self.conv1 = nn.Conv2d(in_chans, 32, 3, 2, 0, bias=False) self.bn1 = nn.BatchNorm2d(32) self.act1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(32, 64, 3, bias=False) self.bn2 = nn.BatchNorm2d(64) self.act2 = nn.ReLU(inplace=True) self.block1 = Block(64, 128, 2, 2, start_with_relu=False) self.block2 = Block(128, 256, 2, 2) self.block3 = Block(256, 728, 2, 2) self.block4 = Block(728, 728, 3, 1) self.block5 = Block(728, 728, 3, 1) self.block6 = Block(728, 728, 3, 1) self.block7 = Block(728, 728, 3, 1) self.block8 = Block(728, 728, 3, 1) self.block9 = Block(728, 728, 3, 1) self.block10 = Block(728, 728, 3, 1) self.block11 = Block(728, 728, 3, 1) self.block12 = Block(728, 1024, 2, 2, grow_first=False) self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) self.bn3 = nn.BatchNorm2d(1536) self.act3 = nn.ReLU(inplace=True) self.conv4 = SeparableConv2d(1536, self.num_features, 3, 1, 1) self.bn4 = nn.BatchNorm2d(self.num_features) self.act4 = nn.ReLU(inplace=True) self.feature_info = [ dict(num_chs=64, reduction=2, module='act2'), dict(num_chs=128, reduction=4, module='block2.rep.0'), dict(num_chs=256, reduction=8, module='block3.rep.0'), dict(num_chs=728, reduction=16, module='block12.rep.0'), dict(num_chs=2048, reduction=32, module='act4'), ] self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) # #------- init weights -------- for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^conv[12]|bn[12]', blocks=[ (r'^block(\d+)', None), (r'^conv[34]|bn[34]', (99,)), ], ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, "gradient checkpointing not supported" @torch.jit.ignore def get_classifier(self): return self.fc def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.conv1(x) x = self.bn1(x) x = self.act1(x) x = self.conv2(x) x = self.bn2(x) x = self.act2(x) x = self.block1(x) x = self.block2(x) x = self.block3(x) x = self.block4(x) x = self.block5(x) x = self.block6(x) x = self.block7(x) x = self.block8(x) x = self.block9(x) x = self.block10(x) x = self.block11(x) x = self.block12(x) x = self.conv3(x) x = self.bn3(x) x = self.act3(x) x = self.conv4(x) x = self.bn4(x) x = self.act4(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) if self.drop_rate: F.dropout(x, self.drop_rate, training=self.training) return x if pre_logits else self.fc(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _xception(variant, pretrained=False, **kwargs): return build_model_with_cfg( Xception, variant, pretrained, feature_cfg=dict(feature_cls='hook'), **kwargs) default_cfgs = generate_default_cfgs({ 'legacy_xception.tf_in1k': { 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/xception-43020ad28.pth', 'input_size': (3, 299, 299), 'pool_size': (10, 10), 'crop_pct': 0.8975, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'num_classes': 1000, 'first_conv': 'conv1', 'classifier': 'fc' # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 } }) @register_model def legacy_xception(pretrained=False, **kwargs) -> Xception: return _xception('legacy_xception', pretrained=pretrained, **kwargs) register_model_deprecations(__name__, { 'xception': 'legacy_xception', })
pytorch-image-models/timm/models/xception.py/0
{ "file_path": "pytorch-image-models/timm/models/xception.py", "repo_id": "pytorch-image-models", "token_count": 3973 }
183
""" NAdamW Optimizer Based on simplified algorithm in https://github.com/mlcommons/algorithmic-efficiency/tree/main/baselines/nadamw Added multi-tensor (foreach) path. """ import math from typing import List, Optional import torch from torch import Tensor # Modified from github.com/pytorch/pytorch/blob/v1.12.1/torch/optim/adamw.py. class NAdamW(torch.optim.Optimizer): r"""Implements NAdamW algorithm. See Table 1 in https://arxiv.org/abs/1910.05446 for the implementation of the NAdam algorithm (there is also a comment in the code which highlights the only difference of NAdamW and AdamW). For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay coefficient (default: 1e-2) .. _Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, maximize: bool = False, foreach: Optional[bool] = None, capturable: bool = False, ): if not 0.0 <= lr: raise ValueError(f'Invalid learning rate: {lr}') if not 0.0 <= eps: raise ValueError(f'Invalid epsilon value: {eps}') if not 0.0 <= betas[0] < 1.0: raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}') if not 0.0 <= betas[1] < 1.0: raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}') if not 0.0 <= weight_decay: raise ValueError(f'Invalid weight_decay value: {weight_decay}') defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, foreach=foreach, maximize=maximize, capturable=capturable, ) super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) state_values = list(self.state.values()) step_is_tensor = (len(state_values) != 0) and torch.is_tensor( state_values[0]['step']) if not step_is_tensor: for s in state_values: s['step'] = torch.tensor(float(s['step'])) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ self._cuda_graph_capture_health_check() loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avgs = [] exp_avg_sqs = [] state_steps = [] beta1, beta2 = group['betas'] for p in group['params']: if p.grad is None: continue params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('NAdamW does not support sparse gradients') grads.append(p.grad) state = self.state[p] # State initialization if len(state) == 0: state['step'] = torch.tensor(0.) # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avgs.append(state['exp_avg']) exp_avg_sqs.append(state['exp_avg_sq']) state_steps.append(state['step']) nadamw( params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps, beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], eps=group['eps'], maximize=group['maximize'], capturable=group['capturable'], ) return loss def nadamw( params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[Tensor], foreach: Optional[bool] = None, capturable: bool = False, *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool, ) -> None: r"""Functional API that performs NAdamW algorithm computation. See NAdamW class for details. """ if not all(isinstance(t, torch.Tensor) for t in state_steps): raise RuntimeError( 'API has changed, `state_steps` argument must contain a list of' + ' singleton tensors') if foreach is None: foreach = True if foreach and not torch.jit.is_scripting(): func = _multi_tensor_nadamw else: func = _single_tensor_nadamw func( params, grads, exp_avgs, exp_avg_sqs, state_steps, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, eps=eps, maximize=maximize, capturable=capturable, ) def _single_tensor_nadamw( params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool, capturable: bool ): for i, param in enumerate(params): grad = grads[i] if not maximize else -grads[i] exp_avg = exp_avgs[i] exp_avg_sq = exp_avg_sqs[i] step_t = state_steps[i] # Update step. step_t += 1 # Perform stepweight decay. param.mul_(1. - lr * weight_decay) # Decay the first and second moment running average coefficient. exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) if capturable: step = step_t # 1 - beta1 ** step can't be captured in a CUDA graph, even if step is a CUDA tensor # (incurs "RuntimeError: CUDA error: operation not permitted when stream is capturing") bias_correction1 = 1 - torch.pow(beta1, step) bias_correction2 = 1 - torch.pow(beta2, step) step_size = lr / bias_correction1 step_size_neg = step_size.neg() bias_correction2_sqrt = bias_correction2.sqrt() # Only difference between NAdamW and AdamW in this implementation. # The official PyTorch implementation of NAdam uses a different algorithm. exp_avg = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1) denom = (exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg) param.addcdiv_(exp_avg, denom) else: step = step_t.item() bias_correction1 = 1 - beta1 ** step bias_correction2 = 1 - beta2 ** step step_size = lr / bias_correction1 bias_correction2_sqrt = math.sqrt(bias_correction2) # Only difference between NAdamW and AdamW in this implementation. # The official PyTorch implementation of NAdam uses a different algorithm. exp_avg = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1) denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps) param.addcdiv_(exp_avg, denom, value=-step_size) def _multi_tensor_nadamw( params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool, capturable: bool, ): if len(params) == 0: return if capturable: assert all( p.is_cuda and step.is_cuda for p, step in zip(params, state_steps) ), "If capturable=True, params and state_steps must be CUDA tensors." if maximize: grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment] grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads] exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs] exp_avg_sqs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avg_sqs] params = [torch.view_as_real(x) if torch.is_complex(x) else x for x in params] # update steps torch._foreach_add_(state_steps, 1) # Perform stepweight decay torch._foreach_mul_(params, 1 - lr * weight_decay) # Decay the first and second moment running average coefficient torch._foreach_mul_(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) torch._foreach_mul_(exp_avg_sqs, beta2) torch._foreach_addcmul_(exp_avg_sqs, grads, grads, 1 - beta2) if capturable: # TODO: use foreach_pow if/when foreach_pow is added bias_correction1 = [torch.pow(beta1, step) for step in state_steps] bias_correction2 = [torch.pow(beta2, step) for step in state_steps] # foreach_sub doesn't allow a scalar as the first arg torch._foreach_sub_(bias_correction1, 1) torch._foreach_sub_(bias_correction2, 1) torch._foreach_neg_(bias_correction1) torch._foreach_neg_(bias_correction2) # foreach_div doesn't allow a scalar as the first arg step_size = torch._foreach_div(bias_correction1, lr) torch._foreach_reciprocal_(step_size) torch._foreach_neg_(step_size) bias_correction2_sqrt = torch._foreach_sqrt(bias_correction2) # Only difference between NAdamW and AdamW in this implementation. # The official PyTorch implementation of NAdam uses a different algorithm. exp_avgs = torch._foreach_mul(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) torch._foreach_div_( exp_avg_sq_sqrt, torch._foreach_mul(bias_correction2_sqrt, step_size) ) eps_over_step_size = torch._foreach_div(step_size, eps) torch._foreach_reciprocal_(eps_over_step_size) denom = torch._foreach_add(exp_avg_sq_sqrt, eps_over_step_size) torch._foreach_addcdiv_(params, exp_avgs, denom) else: bias_correction1 = [1 - beta1 ** step.item() for step in state_steps] bias_correction2 = [1 - beta2 ** step.item() for step in state_steps] step_size = [(lr / bc) * -1 for bc in bias_correction1] bias_correction2_sqrt = [math.sqrt(bc) for bc in bias_correction2] # Only difference between NAdamW and AdamW in this implementation. # The official PyTorch implementation of NAdam uses a different algorithm. exp_avgs = torch._foreach_mul(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) denom = torch._foreach_add(exp_avg_sq_sqrt, eps) torch._foreach_addcdiv_(params, exp_avgs, denom, step_size)
pytorch-image-models/timm/optim/nadamw.py/0
{ "file_path": "pytorch-image-models/timm/optim/nadamw.py", "repo_id": "pytorch-image-models", "token_count": 5958 }
184
from .agc import adaptive_clip_grad from .checkpoint_saver import CheckpointSaver from .clip_grad import dispatch_clip_grad from .cuda import ApexScaler, NativeScaler from .decay_batch import decay_batch_step, check_batch_size_retry from .distributed import distribute_bn, reduce_tensor, init_distributed_device,\ world_info_from_env, is_distributed_env, is_primary from .jit import set_jit_legacy, set_jit_fuser from .log import setup_default_logging, FormatterNoInfo from .metrics import AverageMeter, accuracy from .misc import natural_key, add_bool_arg, ParseKwargs from .model import unwrap_model, get_state_dict, freeze, unfreeze, reparameterize_model from .model_ema import ModelEma, ModelEmaV2 from .random import random_seed from .summary import update_summary, get_outdir
pytorch-image-models/timm/utils/__init__.py/0
{ "file_path": "pytorch-image-models/timm/utils/__init__.py", "repo_id": "pytorch-image-models", "token_count": 246 }
185
__version__ = '0.9.14dev0'
pytorch-image-models/timm/version.py/0
{ "file_path": "pytorch-image-models/timm/version.py", "repo_id": "pytorch-image-models", "token_count": 14 }
186
Hugging Face Optimized Inference License 1.0 (HFOILv1.0) This License Agreement governs the use of the Software and its Modifications. It is a binding agreement between the Licensor and You. This License Agreement shall be referred to as Hugging Face Optimized Inference License 1.0 or HFOILv1.0. We may publish revised versions of this License Agreement from time to time. Each version will be given a distinguished number. By downloading, accessing, modifying, distributing or otherwise using the Software, You consent to all of the terms and conditions below. So, if You do not agree with those, please do not download, access, modify, distribute, or use the Software. 1. PERMISSIONS You may use, modify and distribute the Software pursuant to the following terms and conditions: Copyright License. Subject to the terms and conditions of this License Agreement and where and as applicable, each Contributor hereby grants You a perpetual, worldwide, non-exclusive, royalty-free, copyright license to reproduce, prepare, publicly display, publicly perform, sublicense under the terms herein, and distribute the Software and Modifications of the Software. Patent License. Subject to the terms and conditions of this License Agreement and where and as applicable, each Contributor hereby grants You a perpetual, worldwide, non-exclusive, royalty-free patent license to make, have made, Use, offer to sell, sell, import, and otherwise transfer the Software, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Software to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Software or a Contribution incorporated within the Software constitutes direct or contributory patent infringement, then any rights granted to You under this License Agreement for the Software shall terminate as of the date such litigation is filed. No other rights. All rights not expressly granted herein are retained. 2. RESTRICTIONS You may not distribute the Software as a hosted or managed, and paid service, where the service grants users access to any substantial set of the features or functionality of the Software. If you wish to do so, You will need to be granted additional rights from the Licensor which will be subject to a separate mutually agreed agreement. You may not sublicense the Software under any other terms than those listed in this License. 3. OBLIGATIONS When You modify the Software, You agree to: - attach a notice stating the Modifications of the Software You made; and - attach a notice stating that the Modifications of the Software are released under this License Agreement. When You distribute the Software or Modifications of the Software, You agree to: - give any recipients of the Software a copy of this License Agreement; - retain all Explanatory Documentation; and if sharing the Modifications of the Software, add Explanatory Documentation documenting the changes made to create the Modifications of the Software; - retain all copyright, patent, trademark and attribution notices. 4. MISCELLANEOUS Termination. Licensor reserves the right to restrict Use of the Software in violation of this License Agreement, upon which Your licenses will automatically terminate. Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Software by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. Trademarks and related. Nothing in this License Agreement permits You (i) to make Use of Licensors’ trademarks, trade names, or logos, (ii) otherwise suggest endorsement by Licensor, or (iii) misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors. Output You generate. Licensor claims no rights in the Output. You agree not to contravene any provision as stated in the License Agreement with your Use of the Output. Disclaimer of Warranty. Except as expressly provided otherwise herein, and to the fullest extent permitted by law, Licensor provides the Software (and each Contributor provides its Contributions) AS IS, and Licensor disclaims all warranties or guarantees of any kind, express or implied, whether arising under any law or from any usage in trade, or otherwise including but not limited to the implied warranties of merchantability, non-infringement, quiet enjoyment, fitness for a particular purpose, or otherwise. You are solely responsible for determining the appropriateness of the Software and Modifications of the Software for your purposes (including your use or distribution of the Software and Modifications of the Software), and assume any risks associated with Your exercise of permissions under this License Agreement. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License Agreement or out of the Use or inability to Use the Software (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, model failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. Accepting Warranty or Additional Liability. While sharing the Software or Modifications of the Software thereof, You may choose to offer and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License Agreement. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of Licensor or any other Contributor, and you hereby agree to indemnify, defend, and hold Licensor and each other Contributor (and their successors or assigns) harmless for any liability incurred by, or claims asserted against, such Licensor or Contributor (and their successors or assigns) by reason of your accepting any such warranty or additional liability. Severability. This License Agreement is a license of copyright and patent rights and an agreement in contract between You and the Licensor. If any provision of this License Agreement is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein. 5. DEFINITIONS “Contribution” refers to any work of authorship, including the original version of the Software and any Modifications of the Software that is intentionally submitted to Licensor for inclusion in the Software by the copyright owner or by an individual or entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, “submitted” means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Software, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as “Not a Contribution.” “Contributor” refers to Licensor and any individual or entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Software. “Data” refers to a collection of information extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License Agreement. “Explanatory Documentation” refers to any documentation or related information including but not limited to model cards or data cards dedicated to inform the public about the characteristics of the Software. Explanatory documentation is not licensed under this License. "License Agreement" refers to these terms and conditions. “Licensor” refers to the rights owners or entity authorized by the rights owners that are granting the terms and conditions of this License Agreement. “Model” refers to machine-learning based assemblies (including checkpoints), consisting of learnt weights and parameters (including optimizer states), corresponding to a model architecture as embodied in Software source code. Source code is not licensed under this License Agreement. “Modifications of the Software” refers to all changes to the Software, including without limitation derivative works of the Software. “Output” refers to the results of operating the Software. “Share” refers to any transmission, reproduction, publication or other sharing of the Software or Modifications of the Software to a third party, including providing the Softwaire as a hosted service made available by electronic or other remote means, including - but not limited to - API-based or web access. “Software” refers to the software and Model (or parts of either) that Licensor makes available under this License Agreement. “Third Parties” refers to individuals or legal entities that are not under common control with Licensor or You. “Use” refers to anything You or your representatives do with the Software, including but not limited to generating any Output, fine tuning, updating, running, training, evaluating and/or reparametrizing the Model. "You" (or "Your") refers to an individual or Legal Entity exercising permissions granted by this License Agreement and/or making Use of the Software for whichever purpose and in any field of Use.
text-generation-inference/LICENSE/0
{ "file_path": "text-generation-inference/LICENSE", "repo_id": "text-generation-inference", "token_count": 2207 }
187
# Text Generation The Hugging Face Text Generation Python library provides a convenient way of interfacing with a `text-generation-inference` instance running on [Hugging Face Inference Endpoints](https://huggingface.co/inference-endpoints) or on the Hugging Face Hub. ## Get Started ### Install ```shell pip install text-generation ``` ### Inference API Usage ```python from text_generation import InferenceAPIClient client = InferenceAPIClient("bigscience/bloomz") text = client.generate("Why is the sky blue?").generated_text print(text) # ' Rayleigh scattering' # Token Streaming text = "" for response in client.generate_stream("Why is the sky blue?"): if not response.token.special: text += response.token.text print(text) # ' Rayleigh scattering' ``` or with the asynchronous client: ```python from text_generation import InferenceAPIAsyncClient client = InferenceAPIAsyncClient("bigscience/bloomz") response = await client.generate("Why is the sky blue?") print(response.generated_text) # ' Rayleigh scattering' # Token Streaming text = "" async for response in client.generate_stream("Why is the sky blue?"): if not response.token.special: text += response.token.text print(text) # ' Rayleigh scattering' ``` Check all currently deployed models on the Huggingface Inference API with `Text Generation` support: ```python from text_generation.inference_api import deployed_models print(deployed_models()) ``` ### Hugging Face Inference Endpoint usage ```python from text_generation import Client endpoint_url = "https://YOUR_ENDPOINT.endpoints.huggingface.cloud" client = Client(endpoint_url) text = client.generate("Why is the sky blue?").generated_text print(text) # ' Rayleigh scattering' # Token Streaming text = "" for response in client.generate_stream("Why is the sky blue?"): if not response.token.special: text += response.token.text print(text) # ' Rayleigh scattering' ``` or with the asynchronous client: ```python from text_generation import AsyncClient endpoint_url = "https://YOUR_ENDPOINT.endpoints.huggingface.cloud" client = AsyncClient(endpoint_url) response = await client.generate("Why is the sky blue?") print(response.generated_text) # ' Rayleigh scattering' # Token Streaming text = "" async for response in client.generate_stream("Why is the sky blue?"): if not response.token.special: text += response.token.text print(text) # ' Rayleigh scattering' ``` ### Types ```python # Request Parameters class Parameters: # Activate logits sampling do_sample: bool # Maximum number of generated tokens max_new_tokens: int # The parameter for repetition penalty. 1.0 means no penalty. # See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. repetition_penalty: Optional[float] # Whether to prepend the prompt to the generated text return_full_text: bool # Stop generating tokens if a member of `stop_sequences` is generated stop: List[str] # Random sampling seed seed: Optional[int] # The value used to module the logits distribution. temperature: Optional[float] # The number of highest probability vocabulary tokens to keep for top-k-filtering. top_k: Optional[int] # If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or # higher are kept for generation. top_p: Optional[float] # truncate inputs tokens to the given size truncate: Optional[int] # Typical Decoding mass # See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information typical_p: Optional[float] # Generate best_of sequences and return the one if the highest token logprobs best_of: Optional[int] # Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) watermark: bool # Get decoder input token logprobs and ids decoder_input_details: bool # Return the N most likely tokens at each step top_n_tokens: Optional[int] # Decoder input tokens class InputToken: # Token ID from the model tokenizer id: int # Token text text: str # Logprob # Optional since the logprob of the first token cannot be computed logprob: Optional[float] # Generated tokens class Token: # Token ID from the model tokenizer id: int # Token text text: str # Logprob logprob: float # Is the token a special token # Can be used to ignore tokens when concatenating special: bool # Generation finish reason class FinishReason(Enum): # number of generated tokens == `max_new_tokens` Length = "length" # the model generated its end of sequence token EndOfSequenceToken = "eos_token" # the model generated a text included in `stop_sequences` StopSequence = "stop_sequence" # Additional sequences when using the `best_of` parameter class BestOfSequence: # Generated text generated_text: str # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] # Decoder input tokens, empty if decoder_input_details is False prefill: List[InputToken] # Generated tokens tokens: List[Token] # Most likely tokens top_tokens: Optional[List[List[Token]]] # `generate` details class Details: # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] # Decoder input tokens, empty if decoder_input_details is False prefill: List[InputToken] # Generated tokens tokens: List[Token] # Most likely tokens top_tokens: Optional[List[List[Token]]] # Additional sequences when using the `best_of` parameter best_of_sequences: Optional[List[BestOfSequence]] # `generate` return value class Response: # Generated text generated_text: str # Generation details details: Details # `generate_stream` details class StreamDetails: # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] # `generate_stream` return value class StreamResponse: # Generated token token: Token # Most likely tokens top_tokens: Optional[List[Token]] # Complete generated text # Only available when the generation is finished generated_text: Optional[str] # Generation details # Only available when the generation is finished details: Optional[StreamDetails] # Inference API currently deployed model class DeployedModel: model_id: str sha: str ```
text-generation-inference/clients/python/README.md/0
{ "file_path": "text-generation-inference/clients/python/README.md", "repo_id": "text-generation-inference", "token_count": 2195 }
188
# Consuming Text Generation Inference There are many ways you can consume Text Generation Inference server in your applications. After launching, you can use the `/generate` route and make a `POST` request to get results from the server. You can also use the `/generate_stream` route if you want TGI to return a stream of tokens. You can make the requests using the tool of your preference, such as curl, Python or TypeScrpt. For a final end-to-end experience, we also open-sourced ChatUI, a chat interface for open-source models. ## curl After the launch, you can query the model using either the `/generate` or `/generate_stream` routes: ```bash curl 127.0.0.1:8080/generate \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' ``` ## Inference Client [`huggingface-hub`](https://huggingface.co/docs/huggingface_hub/main/en/index) is a Python library to interact with the Hugging Face Hub, including its endpoints. It provides a nice high-level class, [`~huggingface_hub.InferenceClient`], which makes it easy to make calls to a TGI endpoint. `InferenceClient` also takes care of parameter validation and provides a simple to-use interface. You can simply install `huggingface-hub` package with pip. ```bash pip install huggingface-hub ``` Once you start the TGI server, instantiate `InferenceClient()` with the URL to the endpoint serving the model. You can then call `text_generation()` to hit the endpoint through Python. ```python from huggingface_hub import InferenceClient client = InferenceClient(model="http://127.0.0.1:8080") client.text_generation(prompt="Write a code for snake game") ``` You can do streaming with `InferenceClient` by passing `stream=True`. Streaming will return tokens as they are being generated in the server. To use streaming, you can do as follows: ```python for token in client.text_generation("How do you make cheese?", max_new_tokens=12, stream=True): print(token) ``` Another parameter you can use with TGI backend is `details`. You can get more details on generation (tokens, probabilities, etc.) by setting `details` to `True`. When it's specified, TGI will return a `TextGenerationResponse` or `TextGenerationStreamResponse` rather than a string or stream. ```python output = client.text_generation(prompt="Meaning of life is", details=True) print(output) # TextGenerationResponse(generated_text=' a complex concept that is not always clear to the individual. It is a concept that is not always', details=Details(finish_reason=<FinishReason.Length: 'length'>, generated_tokens=20, seed=None, prefill=[], tokens=[Token(id=267, text=' a', logprob=-2.0723474, special=False), Token(id=11235, text=' complex', logprob=-3.1272552, special=False), Token(id=17908, text=' concept', logprob=-1.3632495, special=False),..)) ``` You can see how to stream below. ```python output = client.text_generation(prompt="Meaning of life is", stream=True, details=True) print(next(iter(output))) # TextGenerationStreamResponse(token=Token(id=267, text=' a', logprob=-2.0723474, special=False), generated_text=None, details=None) ``` You can check out the details of the function [here](https://huggingface.co/docs/huggingface_hub/main/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation). There is also an async version of the client, `AsyncInferenceClient`, based on `asyncio` and `aiohttp`. You can find docs for it [here](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.AsyncInferenceClient) ## ChatUI ChatUI is an open-source interface built for LLM serving. It offers many customization options, such as web search with SERP API and more. ChatUI can automatically consume the TGI server and even provides an option to switch between different TGI endpoints. You can try it out at [Hugging Chat](https://huggingface.co/chat/), or use the [ChatUI Docker Space](https://huggingface.co/new-space?template=huggingchat/chat-ui-template) to deploy your own Hugging Chat to Spaces. To serve both ChatUI and TGI in same environment, simply add your own endpoints to the `MODELS` variable in `.env.local` file inside the `chat-ui` repository. Provide the endpoints pointing to where TGI is served. ``` { // rest of the model config here "endpoints": [{"url": "https://HOST:PORT/generate_stream"}] } ``` ![ChatUI](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chatui_screen.png) ## Gradio Gradio is a Python library that helps you build web applications for your machine learning models with a few lines of code. It has a `ChatInterface` wrapper that helps create neat UIs for chatbots. Let's take a look at how to create a chatbot with streaming mode using TGI and Gradio. Let's install Gradio and Hub Python library first. ```bash pip install huggingface-hub gradio ``` Assume you are serving your model on port 8080, we will query through [InferenceClient](consuming_tgi#inference-client). ```python import gradio as gr from huggingface_hub import InferenceClient client = InferenceClient(model="http://127.0.0.1:8080") def inference(message, history): partial_message = "" for token in client.text_generation(message, max_new_tokens=20, stream=True): partial_message += token yield partial_message gr.ChatInterface( inference, chatbot=gr.Chatbot(height=300), textbox=gr.Textbox(placeholder="Chat with me!", container=False, scale=7), description="This is the demo for Gradio UI consuming TGI endpoint with LLaMA 7B-Chat model.", title="Gradio 🤝 TGI", examples=["Are tomatoes vegetables?"], retry_btn="Retry", undo_btn="Undo", clear_btn="Clear", ).queue().launch() ``` The UI looks like this 👇 <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/gradio-tgi.png" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/gradio-tgi-dark.png" /> </div> You can try the demo directly here 👇 <div class="block dark:hidden"> <iframe src="https://merve-gradio-tgi-2.hf.space?__theme=light" width="850" height="750" ></iframe> </div> <div class="hidden dark:block"> <iframe src="https://merve-gradio-tgi-2.hf.space?__theme=dark" width="850" height="750" ></iframe> </div> You can disable streaming mode using `return` instead of `yield` in your inference function, like below. ```python def inference(message, history): return client.text_generation(message, max_new_tokens=20) ``` You can read more about how to customize a `ChatInterface` [here](https://www.gradio.app/guides/creating-a-chatbot-fast). ## API documentation You can consult the OpenAPI documentation of the `text-generation-inference` REST API using the `/docs` route. The Swagger UI is also available [here](https://huggingface.github.io/text-generation-inference).
text-generation-inference/docs/source/basic_tutorials/consuming_tgi.md/0
{ "file_path": "text-generation-inference/docs/source/basic_tutorials/consuming_tgi.md", "repo_id": "text-generation-inference", "token_count": 2274 }
189
# Supported Models and Hardware Text Generation Inference enables serving optimized models on specific hardware for the highest performance. The following sections list which models are hardware are supported. ## Supported Models The following models are optimized and can be served with TGI, which uses custom CUDA kernels for better inference. You can add the flag `--disable-custom-kernels` at the end of the `docker run` command if you wish to disable them. - [BLOOM](https://huggingface.co/bigscience/bloom) - [FLAN-T5](https://huggingface.co/google/flan-t5-xxl) - [Galactica](https://huggingface.co/facebook/galactica-120b) - [GPT-Neox](https://huggingface.co/EleutherAI/gpt-neox-20b) - [Llama](https://github.com/facebookresearch/llama) - [OPT](https://huggingface.co/facebook/opt-66b) - [SantaCoder](https://huggingface.co/bigcode/santacoder) - [Starcoder](https://huggingface.co/bigcode/starcoder) - [Falcon 7B](https://huggingface.co/tiiuae/falcon-7b) - [Falcon 40B](https://huggingface.co/tiiuae/falcon-40b) - [MPT](https://huggingface.co/mosaicml/mpt-30b) - [Llama V2](https://huggingface.co/meta-llama) - [Code Llama](https://huggingface.co/codellama) - [Mistral](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) - [Mixtral](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) - [Phi](https://huggingface.co/microsoft/phi-2) If the above list lacks the model you would like to serve, depending on the model's pipeline type, you can try to initialize and serve the model anyways to see how well it performs, but performance isn't guaranteed for non-optimized models: ```python # for causal LMs/text-generation models AutoModelForCausalLM.from_pretrained(<model>, device_map="auto")` # or, for text-to-text generation models AutoModelForSeq2SeqLM.from_pretrained(<model>, device_map="auto") ``` If you wish to serve a supported model that already exists on a local folder, just point to the local folder. ```bash text-generation-launcher --model-id <PATH-TO-LOCAL-BLOOM> `````` ## Supported Hardware TGI optimized models are supported on NVIDIA [A100](https://www.nvidia.com/en-us/data-center/a100/), [A10G](https://www.nvidia.com/en-us/data-center/products/a10-gpu/) and [T4](https://www.nvidia.com/en-us/data-center/tesla-t4/) GPUs with CUDA 12.2+. Note that you have to install [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) to use it. For other NVIDIA GPUs, continuous batching will still apply, but some operations like flash attention and paged attention will not be executed. TGI also has support of ROCm-enabled AMD Instinct MI210 and MI250 GPUs, with paged attention, GPTQ quantization, flash attention v2 support. The following features are currently not supported in the ROCm version of TGI, and the supported may be extended in the future: * Loading [AWQ](https://huggingface.co/docs/transformers/quantization#awq) checkpoints. * Flash [layer norm kernel](https://github.com/Dao-AILab/flash-attention/tree/main/csrc/layer_norm) * Kernel for slinding window attention (Mistral) TGI is also supported on the following AI hardware accelerators: - *Habana first-gen Gaudi and Gaudi2:* check out this [repository](https://github.com/huggingface/tgi-gaudi) to serve models with TGI on Gaudi and Gaudi2 with [Optimum Habana](https://huggingface.co/docs/optimum/habana/index) * *AWS Inferentia2:* check out this [guide](https://github.com/huggingface/optimum-neuron/tree/main/text-generation-inference) on how to serve models with TGI on Inferentia2.
text-generation-inference/docs/source/supported_models.md/0
{ "file_path": "text-generation-inference/docs/source/supported_models.md", "repo_id": "text-generation-inference", "token_count": 1170 }
190
{ "details": { "best_of_sequences": null, "finish_reason": "stop_sequence", "generated_tokens": 5, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.546875, "text": "request" } ], "seed": 0, "tokens": [ { "id": 5229, "logprob": -2.5839844, "special": false, "text": " failed" }, { "id": 29901, "logprob": -0.44970703, "special": false, "text": ":" }, { "id": 4829, "logprob": -1.8339844, "special": false, "text": " Error" }, { "id": 297, "logprob": -1.0556641, "special": false, "text": " in" }, { "id": 1243, "logprob": 0.0, "special": false, "text": " test" } ], "top_tokens": null }, "generated_text": "Test request failed: Error in test" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_all_params.json", "repo_id": "text-generation-inference", "token_count": 669 }
191
{ "details": { "best_of_sequences": null, "finish_reason": "stop_sequence", "generated_tokens": 6, "prefill": [ { "id": 14402, "logprob": null, "text": "Test" }, { "id": 2581, "logprob": -11.6171875, "text": " request" } ], "seed": 0, "tokens": [ { "id": 284, "logprob": -0.19421387, "special": false, "text": " to" }, { "id": 3758, "logprob": -0.62597656, "special": false, "text": " send" }, { "id": 1366, "logprob": -0.87060547, "special": false, "text": " data" }, { "id": 625, "logprob": -0.88427734, "special": false, "text": " over" }, { "id": 257, "logprob": -1.0830078, "special": false, "text": " a" }, { "id": 3127, "logprob": -1.9462891, "special": false, "text": " network" } ], "top_tokens": null }, "generated_text": "Test request to send data over a network" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_all_params.json", "repo_id": "text-generation-inference", "token_count": 690 }
192
[ { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 6, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 259, "logprob": -1.3798828, "special": false, "text": " " }, { "id": 39261, "logprob": -0.36328125, "special": false, "text": "Because" }, { "id": 609, "logprob": -1.0947266, "special": false, "text": " it" }, { "id": 339, "logprob": -0.8286133, "special": false, "text": " is" }, { "id": 16017, "logprob": -1.6826172, "special": false, "text": " blue" }, { "id": 1, "logprob": -0.7290039, "special": true, "text": "</s>" } ] }, "generated_text": "Because it is blue" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 6, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 259, "logprob": -1.3789062, "special": false, "text": " " }, { "id": 39261, "logprob": -0.36279297, "special": false, "text": "Because" }, { "id": 609, "logprob": -1.0966797, "special": false, "text": " it" }, { "id": 339, "logprob": -0.8276367, "special": false, "text": " is" }, { "id": 16017, "logprob": -1.6845703, "special": false, "text": " blue" }, { "id": 1, "logprob": -0.72753906, "special": true, "text": "</s>" } ] }, "generated_text": "Because it is blue" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 6, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 259, "logprob": -1.3789062, "special": false, "text": " " }, { "id": 39261, "logprob": -0.36279297, "special": false, "text": "Because" }, { "id": 609, "logprob": -1.0966797, "special": false, "text": " it" }, { "id": 339, "logprob": -0.8276367, "special": false, "text": " is" }, { "id": 16017, "logprob": -1.6845703, "special": false, "text": " blue" }, { "id": 1, "logprob": -0.72753906, "special": true, "text": "</s>" } ] }, "generated_text": "Because it is blue" }, { "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 6, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 259, "logprob": -1.3789062, "special": false, "text": " " }, { "id": 39261, "logprob": -0.36279297, "special": false, "text": "Because" }, { "id": 609, "logprob": -1.0966797, "special": false, "text": " it" }, { "id": 339, "logprob": -0.8276367, "special": false, "text": " is" }, { "id": 16017, "logprob": -1.6845703, "special": false, "text": " blue" }, { "id": 1, "logprob": -0.72753906, "special": true, "text": "</s>" } ] }, "generated_text": "Because it is blue" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_load.json", "repo_id": "text-generation-inference", "token_count": 2874 }
193
import pytest @pytest.fixture(scope="module") def flash_neox_handle(launcher): with launcher("stabilityai/stablelm-tuned-alpha-3b", num_shard=1) as handle: yield handle @pytest.fixture(scope="module") async def flash_neox(flash_neox_handle): await flash_neox_handle.health(300) return flash_neox_handle.client @pytest.mark.skip @pytest.mark.asyncio async def test_flash_neox(flash_neox, response_snapshot): response = await flash_neox.generate( "<|USER|>What's your mood today?<|ASSISTANT|>", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio async def test_flash_neox_load(flash_neox, generate_load, response_snapshot): responses = await generate_load( flash_neox, "<|USER|>What's your mood today?<|ASSISTANT|>", max_new_tokens=10, n=4, ) generated_texts = [r.generated_text for r in responses] assert len(generated_texts) == 4 assert all( [text == generated_texts[0] for text in generated_texts] ), generated_texts assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_neox.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_neox.py", "repo_id": "text-generation-inference", "token_count": 498 }
194
[package] name = "text-generation-launcher" description = "Text Generation Launcher" version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true [dependencies] clap = { version = "4.4.5", features = ["derive", "env"] } ctrlc = { version = "3.4.1", features = ["termination"] } nix = "0.27.1" serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] } [dev-dependencies] float_eq = "1.0.1" reqwest = { version = "0.11.20", features = ["blocking", "json"] } [build-dependencies] vergen = { version = "8.2.5", features = ["build", "cargo", "git", "gitcl", "rustc", "si"] }
text-generation-inference/launcher/Cargo.toml/0
{ "file_path": "text-generation-inference/launcher/Cargo.toml", "repo_id": "text-generation-inference", "token_count": 277 }
195
eetq_commit := 71adb5e191bb8290069a580abff0355d7b2dd5c9 eetq: # Clone eetq pip install packaging git clone https://github.com/NetEase-FuXi/EETQ.git eetq build-eetq: eetq cd eetq && git fetch && git checkout $(eetq_commit) && git submodule update --init --recursive cd eetq && python setup.py build install-eetq: build-eetq cd eetq && python setup.py install
text-generation-inference/server/Makefile-eetq/0
{ "file_path": "text-generation-inference/server/Makefile-eetq", "repo_id": "text-generation-inference", "token_count": 155 }
196
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #ifndef _q4_matrix_cuh #define _q4_matrix_cuh #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cstdint> class Q4Matrix { public: int device; int height; int width; int groups; int groupsize; uint32_t* cuda_qweight = NULL; uint32_t* cuda_qzeros = NULL; half* cuda_scales = NULL; uint32_t* cuda_x_map = NULL; Q4Matrix ( const int _height, const int _width, const int _groups, uint32_t* _qweight, uint32_t* _qzeros, half* _scales, uint32_t* _g_idx, const int _device ); ~Q4Matrix(); void reconstruct(half* out); private: void make_sequential(const uint32_t* cpu_g_idx); }; void g_q4_keep_matrix(Q4Matrix* m); void g_q4_free_matrices(); #endif
text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cuh/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cuh", "repo_id": "text-generation-inference", "token_count": 419 }
197
#ifndef _q_matrix_cuh #define _q_matrix_cuh #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cstdint> #include <cstdio> #define MAX_SUPERGROUPS 16 class QMatrix { public: int device; bool is_gptq; int height; int width; int groups; int gptq_groupsize; int rows_8; int rows_6; int rows_5; int rows_4; int rows_3; int rows_2; uint32_t* cuda_q_weight = NULL; uint16_t* cuda_q_perm = NULL; uint16_t* cuda_q_invperm = NULL; uint32_t* cuda_q_scale = NULL; half* cuda_q_scale_max = NULL; uint16_t* cuda_q_groups = NULL; uint16_t* cuda_q_group_map = NULL; uint32_t* cuda_gptq_qzeros = NULL; half* cuda_gptq_scales = NULL; half* temp_dq; bool failed; QMatrix ( const int _device, const int _height, const int _width, const int _groups, uint32_t* _q_weight, uint16_t* _q_perm, uint16_t* _q_invperm, uint32_t* _q_scale, half* _q_scale_max, uint16_t* _q_groups, uint16_t* _q_group_map, uint32_t* _gptq_qzeros, half* _gptq_scales, uint32_t* _gptq_g_idx, half* _temp_dq ); ~QMatrix(); void reconstruct(half* out); bool make_sequential(const uint32_t* cpu_g_idx); private: }; #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cuh", "repo_id": "text-generation-inference", "token_count": 702 }
198
import pytest from text_generation_server.pb import generate_pb2 @pytest.fixture def default_pb_parameters(): return generate_pb2.NextTokenChooserParameters( temperature=1.0, repetition_penalty=1.0, top_k=0, top_p=1.0, typical_p=1.0, do_sample=False, ) @pytest.fixture def default_pb_stop_parameters(): return generate_pb2.StoppingCriteriaParameters(stop_sequences=[], max_new_tokens=10)
text-generation-inference/server/tests/conftest.py/0
{ "file_path": "text-generation-inference/server/tests/conftest.py", "repo_id": "text-generation-inference", "token_count": 199 }
199
import torch import torch.distributed from typing import Optional, Type from transformers import ( AutoTokenizer, AutoConfig, PreTrainedTokenizerBase, ) from text_generation_server.models.custom_modeling.bloom_modeling import ( BloomForCausalLM, ) from text_generation_server.models import CausalLM from text_generation_server.models.causal_lm import CausalLMBatch from text_generation_server.pb import generate_pb2 from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, ) class BloomCausalLMBatch(CausalLMBatch): @classmethod def from_pb( cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device, ) -> "CausalLMBatch": batch = super().from_pb(pb=pb, tokenizer=tokenizer, dtype=dtype, device=device) batch.keys_head_dim_last = False return batch class BLOOMSharded(CausalLM): def __init__( self, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): self.process_group, rank, world_size = initialize_torch_distributed() if torch.cuda.is_available(): device = torch.device(f"cuda:{rank}") dtype = torch.float16 if dtype is None else dtype else: device = torch.device("cpu") dtype = torch.float32 if dtype is None else dtype tokenizer = AutoTokenizer.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) config = AutoConfig.from_pretrained( model_id, revision=revision, slow_but_exact=False, tp_parallel=True, trust_remote_code=trust_remote_code, ) config.pad_token_id = 3 config.quantize = quantize torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights( filenames, device=device, dtype=dtype, process_group=self.process_group, prefix="transformer", ) if config.quantize == "gptq": weights._set_gptq_params(model_id, revision) model = BloomForCausalLM(config, weights) torch.distributed.barrier(group=self.process_group) super(CausalLM, self).__init__( model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device, rank=rank, world_size=world_size, ) @property def batch_type(self) -> Type[CausalLMBatch]: return BloomCausalLMBatch def forward( self, input_ids, attention_mask, position_ids, past_key_values: Optional = None ): outputs = self.model.forward( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=True, ) logits = outputs.logits return logits, outputs.past_key_values
text-generation-inference/server/text_generation_server/models/bloom.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/bloom.py", "repo_id": "text-generation-inference", "token_count": 1581 }
200
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for IDEFICS. """ from typing import Callable, List, Optional, Union from urllib.parse import urlparse from transformers.feature_extraction_utils import BatchFeature from transformers.processing_utils import ProcessorMixin from transformers.tokenization_utils_base import ( BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy, ) from transformers.utils import TensorType, is_torch_available from text_generation_server.models.custom_modeling.idefics_image_processing import ( IdeficsImageProcessor, ) if is_torch_available(): import torch IMAGE_TOKEN = "<image>" # copied from m4.training.packing def incremental_to_binary_attention_mask(incremental_mask, num_classes=-1): # This function converts: [-1, 0, 1] => [[0, 0], [1, 0], [0, 1]] # If any of images index are more than num_classes, set them to -1. # Words after the max number of images allowed have been seen don't attend on anything if num_classes != -1: incremental_mask[incremental_mask >= num_classes] = -1 negatives = incremental_mask == -1 incremental_mask[negatives] = 0 attn_mask = torch.nn.functional.one_hot(incremental_mask, num_classes=num_classes) attn_mask[negatives, :] = 0 return attn_mask # copied from m4.training.packing def image_attention_mask_for_packed_input_ids(input_ids, tokenizer): image_attention_mask = torch.full_like(input_ids, fill_value=-1) next_image_attention_mask = torch.full_like(input_ids, fill_value=-1) image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN) eod_token_id = tokenizer.eos_token_id for batch_idx in range(input_ids.size(0)): count = -1 seen_eod = False for idx, token_id in enumerate(input_ids[batch_idx]): if token_id == image_token_id: count += 1 image_attention_mask[batch_idx][idx] = count seen_eod = False else: image_attention_mask[batch_idx][idx] = count if seen_eod: image_attention_mask[batch_idx][idx] = -1 if token_id == eod_token_id: seen_eod = True for batch_idx in range(input_ids.size(0)): count = -1 seen_eod = False for idx in range(input_ids[batch_idx].size(0) - 1, -1, -1): token_id = input_ids[batch_idx][idx] if token_id == image_token_id: count += 1 next_image_attention_mask[batch_idx][idx] = count seen_eod = False else: next_image_attention_mask[batch_idx][idx] = count if token_id == eod_token_id: seen_eod = True if seen_eod: next_image_attention_mask[batch_idx][idx] = -1 non_negative_indices = next_image_attention_mask[batch_idx] != -1 next_image_attention_mask[batch_idx][non_negative_indices] -= count next_image_attention_mask[batch_idx][non_negative_indices] *= -1 return image_attention_mask, next_image_attention_mask def is_url(string): """Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately invalidated the url""" if " " in string: return False result = urlparse(string) return all([result.scheme, result.netloc]) def is_image(string): """Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately invalidated the url""" return is_url(string) or string.startswith("data:") class IdeficsProcessor(ProcessorMixin): r""" Constructs a IDEFICS processor which wraps a LLama tokenizer and IDEFICS image processor into a single processor. [`IdeficsProcessor`] offers all the functionalities of [`IdeficsImageProcessor`] and [`LlamaTokenizerFast`]. See the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information. Args: image_processor (`IdeficsImageProcessor`): An instance of [`IdeficsImageProcessor`]. The image processor is a required input. tokenizer (`LlamaTokenizerFast`): An instance of [`LlamaTokenizerFast`]. The tokenizer is a required input. image_size (`int`, *optional*, defaults to 224): Image size (assuming a square image) """ attributes = ["image_processor", "tokenizer"] image_processor_class = "IdeficsImageProcessor" tokenizer_class = "LlamaTokenizerFast" def __init__( self, image_processor, tokenizer=None, image_size=224, add_end_of_utterance_token=None, **kwargs, ): if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor self.image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN) self.default_image_dims = ( self.image_processor.image_num_channels, self.image_processor.image_size, self.image_processor.image_size, ) self.tokenizer_was_trained_with_end_of_utterance_token = ( True if "<end_of_utterance>" in self.tokenizer.special_tokens_map.get("additional_special_tokens", []) else False ) def __call__( self, prompts: Union[List[TextInput], List[List[TextInput]]], padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, transform: Callable = None, add_eos_token=False, add_end_of_utterance_token=None, debug=False, return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH, ) -> BatchEncoding: """This method takes batched or non-batched prompts made of text and images and converts them into prompts that the model was trained on and prepares the image pixel values for the model to process. Args: prompts (`Union[List[TextInput], [List[List[TextInput]]]]`): either a single prompt or a batched list of prompts - see the detailed description immediately after the end of the arguments doc section. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). truncation (`bool`, *optional*): Activates truncation to cut input sequences longer than `max_length` to `max_length`. transform (`Callable`, *optional*): A custom transform function that accepts a single image can be passed for training. For example, `torchvision.Compose` can be used to compose multiple functions. If `None` a preset inference-specific set of transforms will be applied to the images add_eos_token (`bool`, *optional*, defaults to `False`): Adds `eos_token` at the end of the final prompt if True` add_end_of_utterance_token (`bool`, *optional*) Whether to automatically add `<end_of_utterance>` after each prompt's text input (unless followed by an image). If `None` the tokenizer will be checked instead and if this token is found in `additional_special_tokens` then the value will be `True`. debug (`bool`, *optional*, defaults to `False`): `True` value will help debug prompt generation by dumping useful information return_tensors (`str` or `TensorType`, *optional*, defaults to `TensorType.PYTORCH`): The type of tensors to return. Can be one of: - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. Returns: a dict with entries: `input_ids`, `attention_mask`, `pixel_values`, `image_attention_mask` which can be directly passed to `model.generate` Detailed explanation: Each entry in `prompts` is either a text to be passed as is or an image that will be processed. An image can be either an image object (`PIL.Image`) or a url from which the image can be retrieved. When the processor encounters an image it'll inject `<fake_token_around_image><image><fake_token_around_image>` entry into the prompt. Example: ```python checkpoint = "HuggingFaceM4/idefics-9b" processor = AutoProcessor.from_pretrained(checkpoint) url = "https://hips.hearstapps.com/hmg-prod/images/cute-photos-of-cats-in-grass-1593184777.jpg" img = processor.image_processor.fetch_images([url])[0] prompts = [ "User:", img, "Describe this image.\nAssistant: An image of two kittens in grass.\n", "User:", "https://hips.hearstapps.com/hmg-prod/images/dog-puns-1581708208.jpg", "Describe this image.\nAssistant:", ] inputs = processor(prompts, return_tensors="pt") generated_ids = model.generate(**inputs, max_length=100) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] ``` In this example the `prompts` will be converted into: ``` <s>User:<fake_token_around_image><image><fake_token_around_image>Describe this image. Assistant: An image of two kittens in grass. User:<fake_token_around_image><image><fake_token_around_image>Describe this image. Assistant:' ``` and the two images will be massaged using [`IdeficsImageProcessor.__call__`] method and placed inside the `pixel_values` dict entry of the return value. This example also examplifies that images can be passed as objects or as text urls. It can be seen that the first image is passed as object and the second one as a url. To do training do: ```python image_transform = transforms.Compose( [ transforms.RandomResizedCrop( (w, h), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize(mean=self.image_mean, std=self.image_std), ] ) inputs = processor(prompts, transform=image_transform, return_tensors="pt") ``` In order to help debug prompt generation enable `debug=True` which will show you what's happening. """ # if the value isn't overriden by the user, check if the tokenizer was trained with this token and then use it if add_end_of_utterance_token is None: add_end_of_utterance_token = ( self.tokenizer_was_trained_with_end_of_utterance_token ) # turn non-batched prompts into batched if not any(isinstance(i, list) for i in prompts): prompts = [prompts] fake_token = "<fake_token_around_image>" image_token = "<image>" end_of_utterance_token = "<end_of_utterance>" def image_tokens(last_was_image): if last_was_image: return image_token + fake_token else: return fake_token + image_token + fake_token all_texts = [] all_images = [] for sample in prompts: # the model was trained on samples starting with <s> full_text = f"{self.tokenizer.bos_token}" # an image can either be an image object in the item or the url, everything else is a verbatim prompt text image_objects = [] last_was_image = False last_was_text = False for i, item in enumerate(sample): if i > 0: last_was_text = True if not last_was_image else False if isinstance(item, str): item = item.strip(" ") if is_image(item): image = self.image_processor.fetch_images(item) full_text += image_tokens(last_was_image) image_objects.append(image) last_was_image = True else: # we add end_of_utterance_token between each subsequent text prompts (but not at the last one!) if add_end_of_utterance_token and last_was_text: full_text += end_of_utterance_token full_text += item last_was_image = False else: # must be an image obj full_text += image_tokens(last_was_image) image_objects.append(item) last_was_image = True if add_eos_token: full_text += self.tokenizer.eos_token if debug is True: print(f"{full_text=}") image_objects = self.image_processor(image_objects, transform=transform) text_encoding = self.tokenizer( text=full_text, add_special_tokens=False, padding=padding, truncation=truncation, max_length=max_length, ) all_texts.append(text_encoding["input_ids"]) all_images.append(image_objects) max_seq_len = max(len(x) for x in all_texts) # max_num_images has to be at least 1 even when there are no images max_num_images = max(len(x) for x in all_images) max_num_images = max(1, max_num_images) at_least_one_image = sum(len(x) for x in all_images) > 0 output_input_ids = [] output_images = [] output_attention_masks = [] for text, images in zip(all_texts, all_images): padded_input_ids = [self.tokenizer.pad_token_id] * max_seq_len unpadded_seq_len = len(text) start = max_seq_len - unpadded_seq_len padded_input_ids[start:] = text[:max_seq_len] attention_mask = torch.zeros((max_seq_len,), dtype=torch.long) attention_mask[start:] = 1 image_count = padded_input_ids.count(self.image_token_id) local_max_num_images = min(image_count, max_num_images) current_images = images[:local_max_num_images] if len(current_images) > 0: padded_image_tensor = torch.zeros( max_num_images, *current_images.size()[1:] ) padded_image_tensor[: current_images.size(0)] = current_images else: padded_image_tensor = torch.zeros( max_num_images, *self.default_image_dims ) output_images.append(padded_image_tensor) output_input_ids.append(torch.tensor(padded_input_ids)) output_attention_masks.append(attention_mask) output_input_ids = torch.stack(output_input_ids) output_images = torch.stack(output_images) output_attention_masks = torch.stack(output_attention_masks) if at_least_one_image: image_attention_mask, _ = image_attention_mask_for_packed_input_ids( output_input_ids, self.tokenizer ) image_attention_mask = incremental_to_binary_attention_mask( image_attention_mask, num_classes=max_num_images ) else: # in full language mode we set the image mask to all-0s image_attention_mask = torch.zeros( output_input_ids.shape[0], output_input_ids.shape[1], 1, dtype=torch.bool, ) return BatchFeature( data={ "input_ids": output_input_ids, "attention_mask": output_attention_masks, "pixel_values": output_images, "image_attention_mask": image_attention_mask, } ) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
text-generation-inference/server/text_generation_server/models/custom_modeling/idefics_processing.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/idefics_processing.py", "repo_id": "text-generation-inference", "token_count": 8157 }
201
import torch import torch.distributed from typing import Optional from transformers import ( AutoTokenizer, AutoConfig, ) from text_generation_server.models import CausalLM from text_generation_server.models.custom_modeling.neox_modeling import ( GPTNeoxForCausalLM, ) from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, ) class GPTNeoxSharded(CausalLM): def __init__( self, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): self.process_group, rank, world_size = initialize_torch_distributed() if torch.cuda.is_available(): device = torch.device(f"cuda:{rank}") dtype = torch.float16 if dtype is None else dtype else: device = torch.device("cpu") dtype = torch.float32 if dtype is None else dtype tokenizer = AutoTokenizer.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) tokenizer.pad_token = tokenizer.eos_token config = AutoConfig.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code, ) config.quantize = quantize torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights( filenames, device=device, dtype=dtype, process_group=self.process_group ) if config.quantize == "gptq": weights._set_gptq_params(model_id, revision) model = GPTNeoxForCausalLM(config, weights) torch.distributed.barrier(group=self.process_group) super(CausalLM, self).__init__( model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device, rank=rank, world_size=world_size, ) def forward( self, input_ids, attention_mask, position_ids, past_key_values: Optional = None ): outputs = self.model.forward( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=True, ) logits = outputs.logits return logits, outputs.past_key_values
text-generation-inference/server/text_generation_server/models/gpt_neox.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/gpt_neox.py", "repo_id": "text-generation-inference", "token_count": 1220 }
202
# Copied logic from https://github.com/mit-han-lab/llm-awq/blob/f084f40bd996f3cf3a0633c1ad7d9d476c318aaa/awq/quantize/qmodule.py import math import torch import torch.nn as nn import awq_inference_engine # with CUDA kernels # class ScaledActivation(nn.Module): # def __init__(self, module, scales): # super().__init__() # self.act = module # self.scales = nn.Parameter(scales.data) # # def forward(self, x): # return self.act(x) / self.scales.view(1, 1, -1).to(x.device) class WQLinear(nn.Module): def __init__(self, w_bit, group_size, qweight, qzeros, scales, bias): super().__init__() if w_bit not in [4]: raise NotImplementedError("Only 4-bit are supported for now.") self.in_features = qweight.shape[0] self.out_features = qweight.shape[1] * 32 // w_bit self.w_bit = w_bit self.group_size = group_size if group_size != -1 else self.in_features # quick sanity check (make sure aligment) assert self.in_features % self.group_size == 0 assert self.out_features % (32 // self.w_bit) == 0 self.qweight = qweight self.qzeros = qzeros self.scales = scales if bias: self.bias = bias else: self.bias = None @torch.no_grad() def forward(self, x): out_shape = x.shape[:-1] + (self.out_features,) out = awq_inference_engine.gemm_forward_cuda( x.reshape(-1, x.shape[-1]), self.qweight, self.scales, self.qzeros, 8 ) out = out + self.bias if self.bias is not None else out return out.reshape(out_shape)
text-generation-inference/server/text_generation_server/utils/awq/quantize/qmodule.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/awq/quantize/qmodule.py", "repo_id": "text-generation-inference", "token_count": 770 }
203
import os import json from loguru import logger import torch from transformers import AutoTokenizer from peft import AutoPeftModelForCausalLM, AutoPeftModelForSeq2SeqLM def download_and_unload_peft(model_id, revision, trust_remote_code): torch_dtype = torch.float16 logger.info("Trying to load a Peft model. It might take a while without feedback") try: model = AutoPeftModelForCausalLM.from_pretrained( model_id, revision=revision, torch_dtype=torch_dtype, trust_remote_code=trust_remote_code, low_cpu_mem_usage=True, ) except Exception: model = AutoPeftModelForSeq2SeqLM.from_pretrained( model_id, revision=revision, torch_dtype=torch_dtype, trust_remote_code=trust_remote_code, low_cpu_mem_usage=True, ) logger.info("Peft model detected.") logger.info(f"Merging the lora weights.") base_model_id = model.peft_config["default"].base_model_name_or_path model = model.merge_and_unload() os.makedirs(model_id, exist_ok=True) cache_dir = model_id logger.info(f"Saving the newly created merged model to {cache_dir}") tokenizer = AutoTokenizer.from_pretrained( base_model_id, trust_remote_code=trust_remote_code ) model.save_pretrained(cache_dir, safe_serialization=True) model.config.save_pretrained(cache_dir) tokenizer.save_pretrained(cache_dir)
text-generation-inference/server/text_generation_server/utils/peft.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/peft.py", "repo_id": "text-generation-inference", "token_count": 629 }
204
/* eslint-disable @typescript-eslint/no-explicit-any */ import { bertProcessing, byteLevelProcessing, robertaProcessing, sequenceProcessing, templateProcessing } from '../../' describe('bertProcessing', () => { it('instantiates correctly with only two parameters', () => { const processor = bertProcessing(['sep', 1], ['cls', 2]) expect(processor.constructor.name).toEqual('Processor') }) it('throws if only one argument is provided', () => { expect(() => (bertProcessing as any)(['sep', 1])).toThrow('Given napi value is not an array') }) it('throws if arguments are malformed', () => { expect(() => (bertProcessing as any)(['sep', '1'], ['cls', '2'])).toThrow( 'Failed to convert napi value String into rust type `u32`', ) expect(() => (bertProcessing as any)(['sep'], ['cls'])).toThrow('Array length < 2') }) }) describe('byteLevelProcessing', () => { it('instantiates correctly without any parameter', () => { const processor = byteLevelProcessing() expect(processor.constructor.name).toEqual('Processor') }) it('accepts `undefined` as first parameter', () => { expect(byteLevelProcessing(undefined)).toBeDefined() }) it('accepts `boolean` as first parameter', () => { expect(byteLevelProcessing(true)).toBeDefined() }) }) describe('robertaProcessing', () => { it('instantiates correctly with only two parameters', () => { const processor = robertaProcessing(['sep', 1], ['cls', 2]) expect(processor.constructor.name).toEqual('Processor') }) it('accepts `undefined` as third and fourth parameters', () => { expect(robertaProcessing(['sep', 1], ['cls', 2], undefined, undefined)).toBeDefined() }) it('accepts `boolean` as third and fourth parameter', () => { expect(robertaProcessing(['sep', 1], ['cls', 2], true, true)).toBeDefined() }) }) describe('templateProcessing', () => { it('instantiates correctly with only a single template', () => { const processor = templateProcessing('$A $A') expect(processor.constructor.name).toEqual('Processor') }) it('throws if special tokens are missing', () => { expect(() => templateProcessing('[CLS] $A [SEP]')).toThrow('Missing SpecialToken(s) with id(s)') }) it('instantiates correctly with both templates', () => { const processor = templateProcessing('[CLS] $A [SEP]', '[CLS] $A [SEP] $B:1 [SEP]:1', [ ['[CLS]', 1], ['[SEP]', 2], ]) expect(processor.constructor.name).toEqual('Processor') }) }) describe('sequenceProcessing', () => { it('accepts `PostProcessor[]` as first parameter', () => { const template = templateProcessing('[CLS] $A [SEP]', '[CLS] $A [SEP] $B:1 [SEP]:1', [ ['[CLS]', 1], ['[SEP]', 2], ]) const bytelevel = byteLevelProcessing(true) expect(sequenceProcessing([bytelevel, template])).toBeDefined() }) })
tokenizers/bindings/node/lib/bindings/post-processors.test.ts/0
{ "file_path": "tokenizers/bindings/node/lib/bindings/post-processors.test.ts", "repo_id": "tokenizers", "token_count": 1022 }
205
# `tokenizers-linux-arm64-gnu` This is the **aarch64-unknown-linux-gnu** binary for `tokenizers`
tokenizers/bindings/node/npm/linux-arm64-gnu/README.md/0
{ "file_path": "tokenizers/bindings/node/npm/linux-arm64-gnu/README.md", "repo_id": "tokenizers", "token_count": 35 }
206
use serde::de::Deserializer; use serde::ser::Serializer; use serde::{Deserialize, Serialize}; use std::sync::{Arc, RwLock}; pub fn serialize<S, T>(val: &Option<Arc<RwLock<T>>>, s: S) -> Result<S::Ok, S::Error> where S: Serializer, T: Serialize, { T::serialize(&*(val.clone().unwrap()).read().unwrap(), s) } pub fn deserialize<'de, D, T>(d: D) -> Result<Option<Arc<RwLock<T>>>, D::Error> where D: Deserializer<'de>, T: Deserialize<'de>, { Ok(Some(Arc::new(RwLock::new(T::deserialize(d)?)))) }
tokenizers/bindings/node/src/arc_rwlock_serde.rs/0
{ "file_path": "tokenizers/bindings/node/src/arc_rwlock_serde.rs", "repo_id": "tokenizers", "token_count": 220 }
207
# Generated content DO NOT EDIT class AddedToken: """ Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`. It can have special options that defines the way it should behave. Args: content (:obj:`str`): The content of the token single_word (:obj:`bool`, defaults to :obj:`False`): Defines whether this token should only match single words. If :obj:`True`, this token will never match inside of a word. For example the token ``ing`` would match on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`. The notion of "`inside of a word`" is defined by the word boundaries pattern in regular expressions (ie. the token should start and end with word boundaries). lstrip (:obj:`bool`, defaults to :obj:`False`): Defines whether this token should strip all potential whitespaces on its left side. If :obj:`True`, this token will greedily match any whitespace on its left. For example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text ``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left). rstrip (:obj:`bool`, defaults to :obj:`False`): Defines whether this token should strip all potential whitespaces on its right side. If :obj:`True`, this token will greedily match any whitespace on its right. It works just like :obj:`lstrip` but on the right. normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): Defines whether this token should match against the normalized version of the input text. For example, with the added token ``"yesterday"``, and a normalizer in charge of lowercasing the text, the token could be extract from the input ``"I saw a lion Yesterday"``. special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): Defines whether this token should be skipped when decoding. """ def __init__(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False): pass @property def content(self): """ Get the content of this :obj:`AddedToken` """ pass @property def lstrip(self): """ Get the value of the :obj:`lstrip` option """ pass @property def normalized(self): """ Get the value of the :obj:`normalized` option """ pass @property def rstrip(self): """ Get the value of the :obj:`rstrip` option """ pass @property def single_word(self): """ Get the value of the :obj:`single_word` option """ pass @property def special(self): """ Get the value of the :obj:`special` option """ pass class Encoding: """ The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`. """ @property def attention_mask(self): """ The attention mask This indicates to the LM which tokens should be attended to, and which should not. This is especially important when batching sequences, where we need to applying padding. Returns: :obj:`List[int]`: The attention mask """ pass def char_to_token(self, char_pos, sequence_index=0): """ Get the token that contains the char at the given position in the input sequence. Args: char_pos (:obj:`int`): The position of a char in the input string sequence_index (:obj:`int`, defaults to :obj:`0`): The index of the sequence that contains the target char Returns: :obj:`int`: The index of the token that contains this char in the encoded sequence """ pass def char_to_word(self, char_pos, sequence_index=0): """ Get the word that contains the char at the given position in the input sequence. Args: char_pos (:obj:`int`): The position of a char in the input string sequence_index (:obj:`int`, defaults to :obj:`0`): The index of the sequence that contains the target char Returns: :obj:`int`: The index of the word that contains this char in the input sequence """ pass @property def ids(self): """ The generated IDs The IDs are the main input to a Language Model. They are the token indices, the numerical representations that a LM understands. Returns: :obj:`List[int]`: The list of IDs """ pass @staticmethod def merge(encodings, growing_offsets=True): """ Merge the list of encodings into one final :class:`~tokenizers.Encoding` Args: encodings (A :obj:`List` of :class:`~tokenizers.Encoding`): The list of encodings that should be merged in one growing_offsets (:obj:`bool`, defaults to :obj:`True`): Whether the offsets should accumulate while merging Returns: :class:`~tokenizers.Encoding`: The resulting Encoding """ pass @property def n_sequences(self): """ The number of sequences represented Returns: :obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding` """ pass @property def offsets(self): """ The offsets associated to each token These offsets let's you slice the input string, and thus retrieve the original part that led to producing the corresponding token. Returns: A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets """ pass @property def overflowing(self): """ A :obj:`List` of overflowing :class:`~tokenizers.Encoding` When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting the output into as many pieces as required to match the specified maximum length. This field lets you retrieve all the subsequent pieces. When you use pairs of sequences, the overflowing pieces will contain enough variations to cover all the possible combinations, while respecting the provided maximum length. """ pass def pad(self, length, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]"): """ Pad the :class:`~tokenizers.Encoding` at the given length Args: length (:obj:`int`): The desired length direction: (:obj:`str`, defaults to :obj:`right`): The expected padding direction. Can be either :obj:`right` or :obj:`left` pad_id (:obj:`int`, defaults to :obj:`0`): The ID corresponding to the padding token pad_type_id (:obj:`int`, defaults to :obj:`0`): The type ID corresponding to the padding token pad_token (:obj:`str`, defaults to `[PAD]`): The pad token to use """ pass @property def sequence_ids(self): """ The generated sequence indices. They represent the index of the input sequence associated to each token. The sequence id can be None if the token is not related to any input sequence, like for example with special tokens. Returns: A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index. """ pass def set_sequence_id(self, sequence_id): """ Set the given sequence index Set the given sequence index for the whole range of tokens contained in this :class:`~tokenizers.Encoding`. """ pass @property def special_tokens_mask(self): """ The special token mask This indicates which tokens are special tokens, and which are not. Returns: :obj:`List[int]`: The special tokens mask """ pass def token_to_chars(self, token_index): """ Get the offsets of the token at the given index. The returned offsets are related to the input sequence that contains the token. In order to determine in which input sequence it belongs, you must call :meth:`~tokenizers.Encoding.token_to_sequence()`. Args: token_index (:obj:`int`): The index of a token in the encoded sequence. Returns: :obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)` """ pass def token_to_sequence(self, token_index): """ Get the index of the sequence represented by the given token. In the general use case, this method returns :obj:`0` for a single sequence or the first sequence of a pair, and :obj:`1` for the second sequence of a pair Args: token_index (:obj:`int`): The index of a token in the encoded sequence. Returns: :obj:`int`: The sequence id of the given token """ pass def token_to_word(self, token_index): """ Get the index of the word that contains the token in one of the input sequences. The returned word index is related to the input sequence that contains the token. In order to determine in which input sequence it belongs, you must call :meth:`~tokenizers.Encoding.token_to_sequence()`. Args: token_index (:obj:`int`): The index of a token in the encoded sequence. Returns: :obj:`int`: The index of the word in the relevant input sequence. """ pass @property def tokens(self): """ The generated tokens They are the string representation of the IDs. Returns: :obj:`List[str]`: The list of tokens """ pass def truncate(self, max_length, stride=0, direction="right"): """ Truncate the :class:`~tokenizers.Encoding` at the given length If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating this information is lost. It will be considered as representing a single sequence. Args: max_length (:obj:`int`): The desired length stride (:obj:`int`, defaults to :obj:`0`): The length of previous content to be included in each overflowing piece direction (:obj:`str`, defaults to :obj:`right`): Truncate direction """ pass @property def type_ids(self): """ The generated type IDs Generally used for tasks like sequence classification or question answering, these tokens let the LM know which input sequence corresponds to each tokens. Returns: :obj:`List[int]`: The list of type ids """ pass @property def word_ids(self): """ The generated word indices. They represent the index of the word associated to each token. When the input is pre-tokenized, they correspond to the ID of the given input label, otherwise they correspond to the words indices as defined by the :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. For special tokens and such (any token that was generated from something that was not part of the input), the output is :obj:`None` Returns: A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. """ pass def word_to_chars(self, word_index, sequence_index=0): """ Get the offsets of the word at the given index in one of the input sequences. Args: word_index (:obj:`int`): The index of a word in one of the input sequences. sequence_index (:obj:`int`, defaults to :obj:`0`): The index of the sequence that contains the target word Returns: :obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)` """ pass def word_to_tokens(self, word_index, sequence_index=0): """ Get the encoded tokens corresponding to the word at the given index in one of the input sequences. Args: word_index (:obj:`int`): The index of a word in one of the input sequences. sequence_index (:obj:`int`, defaults to :obj:`0`): The index of the sequence that contains the target word Returns: :obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)` """ pass @property def words(self): """ The generated word indices. .. warning:: This is deprecated and will be removed in a future version. Please use :obj:`~tokenizers.Encoding.word_ids` instead. They represent the index of the word associated to each token. When the input is pre-tokenized, they correspond to the ID of the given input label, otherwise they correspond to the words indices as defined by the :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. For special tokens and such (any token that was generated from something that was not part of the input), the output is :obj:`None` Returns: A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. """ pass class NormalizedString: """ NormalizedString A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one. While making all the requested modifications, it keeps track of the alignment information between the two versions of the string. Args: sequence: str: The string sequence used to initialize this NormalizedString """ def append(self, s): """ Append the given sequence to the string """ pass def clear(self): """ Clears the string """ pass def filter(self, func): """ Filter each character of the string using the given func """ pass def for_each(self, func): """ Calls the given function for each character of the string """ pass def lowercase(self): """ Lowercase the string """ pass def lstrip(self): """ Strip the left of the string """ pass def map(self, func): """ Calls the given function for each character of the string Replaces each character of the string using the returned value. Each returned value **must** be a str of length 1 (ie a character). """ pass def nfc(self): """ Runs the NFC normalization """ pass def nfd(self): """ Runs the NFD normalization """ pass def nfkc(self): """ Runs the NFKC normalization """ pass def nfkd(self): """ Runs the NFKD normalization """ pass @property def normalized(self): """ The normalized part of the string """ pass def prepend(self, s): """ Prepend the given sequence to the string """ pass def replace(self, pattern, content): """ Replace the content of the given pattern with the provided content Args: pattern: Pattern: A pattern used to match the string. Usually a string or a Regex content: str: The content to be used as replacement """ pass def rstrip(self): """ Strip the right of the string """ pass def slice(self, range): """ Slice the string using the given range """ pass def split(self, pattern, behavior): """ Split the NormalizedString using the given pattern and the specified behavior Args: pattern: Pattern: A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex` behavior: SplitDelimiterBehavior: The behavior to use when splitting. Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", "contiguous" Returns: A list of NormalizedString, representing each split """ pass def strip(self): """ Strip both ends of the string """ pass def uppercase(self): """ Uppercase the string """ pass class PreTokenizedString: """ PreTokenizedString Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the underlying string, while keeping track of the alignment information (offsets). The PreTokenizedString manages what we call `splits`. Each split represents a substring which is a subpart of the original string, with the relevant offsets and tokens. When calling one of the methods used to modify the PreTokenizedString (namely one of `split`, `normalize` or `tokenize), only the `splits` that don't have any associated tokens will get modified. Args: sequence: str: The string sequence used to initialize this PreTokenizedString """ def __init__(self, sequence): pass def get_splits(self, offset_referential="original", offset_type="char"): """ Get the splits currently managed by the PreTokenizedString Args: offset_referential: :obj:`str` Whether the returned splits should have offsets expressed relative to the original string, or the normalized one. choices: "original", "normalized". offset_type: :obj:`str` Whether the returned splits should have offsets expressed in bytes or chars. When slicing an str, we usually want to use chars, which is the default value. Now in some cases it might be interesting to get these offsets expressed in bytes, so it is possible to change this here. choices: "char", "bytes" Returns A list of splits """ pass def normalize(self, func): """ Normalize each split of the `PreTokenizedString` using the given `func` Args: func: Callable[[NormalizedString], None]: The function used to normalize each underlying split. This function does not need to return anything, just calling the methods on the provided NormalizedString allow its modification. """ pass def split(self, func): """ Split the PreTokenizedString using the given `func` Args: func: Callable[[index, NormalizedString], List[NormalizedString]]: The function used to split each underlying split. It is expected to return a list of `NormalizedString`, that represent the new splits. If the given `NormalizedString` does not need any splitting, we can just return it directly. In order for the offsets to be tracked accurately, any returned `NormalizedString` should come from calling either `.split` or `.slice` on the received one. """ pass def to_encoding(self, type_id=0, word_idx=None): """ Return an Encoding generated from this PreTokenizedString Args: type_id: int = 0: The type_id to be used on the generated Encoding. word_idx: Optional[int] = None: An optional word index to be used for each token of this Encoding. If provided, all the word indices in the generated Encoding will use this value, instead of the one automatically tracked during pre-tokenization. Returns: An Encoding """ pass def tokenize(self, func): """ Tokenize each split of the `PreTokenizedString` using the given `func` Args: func: Callable[[str], List[Token]]: The function used to tokenize each underlying split. This function must return a list of Token generated from the input str. """ pass class Regex: """ Instantiate a new Regex with the given pattern """ def __init__(self, pattern): pass class Token: pass class Tokenizer: """ A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input and outputs an :class:`~tokenizers.Encoding`. Args: model (:class:`~tokenizers.models.Model`): The core algorithm that this :obj:`Tokenizer` should be using. """ def __init__(self, model): pass def add_special_tokens(self, tokens): """ Add the given special tokens to the Tokenizer. If these tokens are already part of the vocabulary, it just let the Tokenizer know about them. If they don't exist, the Tokenizer creates them, giving them a new id. These special tokens will never be processed by the model (ie won't be split into multiple tokens), and they can be removed from the output when decoding. Args: tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): The list of special tokens we want to add to the vocabulary. Each token can either be a string or an instance of :class:`~tokenizers.AddedToken` for more customization. Returns: :obj:`int`: The number of tokens that were created in the vocabulary """ pass def add_tokens(self, tokens): """ Add the given tokens to the vocabulary The given tokens are added only if they don't already exist in the vocabulary. Each token then gets a new attributed id. Args: tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): The list of tokens we want to add to the vocabulary. Each token can be either a string or an instance of :class:`~tokenizers.AddedToken` for more customization. Returns: :obj:`int`: The number of tokens that were created in the vocabulary """ pass def decode(self, ids, skip_special_tokens=True): """ Decode the given list of ids back to a string This is used to decode anything coming back from a Language Model Args: ids (A :obj:`List/Tuple` of :obj:`int`): The list of ids that we want to decode skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): Whether the special tokens should be removed from the decoded string Returns: :obj:`str`: The decoded string """ pass def decode_batch(self, sequences, skip_special_tokens=True): """ Decode a batch of ids back to their corresponding string Args: sequences (:obj:`List` of :obj:`List[int]`): The batch of sequences we want to decode skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): Whether the special tokens should be removed from the decoded strings Returns: :obj:`List[str]`: A list of decoded strings """ pass @property def decoder(self): """ The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer """ pass def enable_padding( self, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]", length=None, pad_to_multiple_of=None ): """ Enable the padding Args: direction (:obj:`str`, `optional`, defaults to :obj:`right`): The direction in which to pad. Can be either ``right`` or ``left`` pad_to_multiple_of (:obj:`int`, `optional`): If specified, the padding length should always snap to the next multiple of the given value. For example if we were going to pad witha length of 250 but ``pad_to_multiple_of=8`` then we will pad to 256. pad_id (:obj:`int`, defaults to 0): The id to be used when padding pad_type_id (:obj:`int`, defaults to 0): The type id to be used when padding pad_token (:obj:`str`, defaults to :obj:`[PAD]`): The pad token to be used when padding length (:obj:`int`, `optional`): If specified, the length at which to pad. If not specified we pad using the size of the longest sequence in a batch. """ pass def enable_truncation(self, max_length, stride=0, strategy="longest_first", direction="right"): """ Enable truncation Args: max_length (:obj:`int`): The max length at which to truncate stride (:obj:`int`, `optional`): The length of the previous first sequence to be included in the overflowing sequence strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`): The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or ``only_second``. direction (:obj:`str`, defaults to :obj:`right`): Truncate direction """ pass def encode(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True): """ Encode the given sequence and pair. This method can process raw text sequences as well as already pre-tokenized sequences. Example: Here are some examples of the inputs that are accepted:: encode("A single sequence")` encode("A sequence", "And its pair")` encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)` encode( [ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ], is_pretokenized=True ) Args: sequence (:obj:`~tokenizers.InputSequence`): The main input sequence we want to encode. This sequence can be either raw text or pre-tokenized, according to the ``is_pretokenized`` argument: - If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence` - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence` pair (:obj:`~tokenizers.InputSequence`, `optional`): An optional input sequence. The expected format is the same that for ``sequence``. is_pretokenized (:obj:`bool`, defaults to :obj:`False`): Whether the input is already pre-tokenized add_special_tokens (:obj:`bool`, defaults to :obj:`True`): Whether to add the special tokens Returns: :class:`~tokenizers.Encoding`: The encoded result """ pass def encode_batch(self, input, is_pretokenized=False, add_special_tokens=True): """ Encode the given batch of inputs. This method accept both raw text sequences as well as already pre-tokenized sequences. Example: Here are some examples of the inputs that are accepted:: encode_batch([ "A single sequence", ("A tuple with a sequence", "And its pair"), [ "A", "pre", "tokenized", "sequence" ], ([ "A", "pre", "tokenized", "sequence" ], "And its pair") ]) Args: input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`): A list of single sequences or pair sequences to encode. Each sequence can be either raw text or pre-tokenized, according to the ``is_pretokenized`` argument: - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput` - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput` is_pretokenized (:obj:`bool`, defaults to :obj:`False`): Whether the input is already pre-tokenized add_special_tokens (:obj:`bool`, defaults to :obj:`True`): Whether to add the special tokens Returns: A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch """ pass @property def encode_special_tokens(self): """ Modifies the tokenizer in order to use or not the special tokens during encoding. Args: value (:obj:`bool`): Whether to use the special tokens or not """ pass @staticmethod def from_buffer(buffer): """ Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer. Args: buffer (:obj:`bytes`): A buffer containing a previously serialized :class:`~tokenizers.Tokenizer` Returns: :class:`~tokenizers.Tokenizer`: The new tokenizer """ pass @staticmethod def from_file(path): """ Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path. Args: path (:obj:`str`): A path to a local JSON file representing a previously serialized :class:`~tokenizers.Tokenizer` Returns: :class:`~tokenizers.Tokenizer`: The new tokenizer """ pass @staticmethod def from_pretrained(identifier, revision="main", auth_token=None): """ Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the Hugging Face Hub. Args: identifier (:obj:`str`): The identifier of a Model on the Hugging Face Hub, that contains a tokenizer.json file revision (:obj:`str`, defaults to `main`): A branch or commit id auth_token (:obj:`str`, `optional`, defaults to `None`): An optional auth token used to access private repositories on the Hugging Face Hub Returns: :class:`~tokenizers.Tokenizer`: The new tokenizer """ pass @staticmethod def from_str(json): """ Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string. Args: json (:obj:`str`): A valid JSON string representing a previously serialized :class:`~tokenizers.Tokenizer` Returns: :class:`~tokenizers.Tokenizer`: The new tokenizer """ pass def get_added_tokens_decoder(self): """ Get the underlying vocabulary Returns: :obj:`Dict[int, AddedToken]`: The vocabulary """ pass def get_vocab(self, with_added_tokens=True): """ Get the underlying vocabulary Args: with_added_tokens (:obj:`bool`, defaults to :obj:`True`): Whether to include the added tokens Returns: :obj:`Dict[str, int]`: The vocabulary """ pass def get_vocab_size(self, with_added_tokens=True): """ Get the size of the underlying vocabulary Args: with_added_tokens (:obj:`bool`, defaults to :obj:`True`): Whether to include the added tokens Returns: :obj:`int`: The size of the vocabulary """ pass def id_to_token(self, id): """ Convert the given id to its corresponding token if it exists Args: id (:obj:`int`): The id to convert Returns: :obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary """ pass @property def model(self): """ The :class:`~tokenizers.models.Model` in use by the Tokenizer """ pass def no_padding(self): """ Disable padding """ pass def no_truncation(self): """ Disable truncation """ pass @property def normalizer(self): """ The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer """ pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. :param is_pair: Boolean indicating if the input would be a single sentence or a pair :return: """ pass @property def padding(self): """ Get the current padding parameters `Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead` Returns: (:obj:`dict`, `optional`): A dict with the current padding parameters if padding is enabled """ pass def post_process(self, encoding, pair=None, add_special_tokens=True): """ Apply all the post-processing steps to the given encodings. The various steps are: 1. Truncate according to the set truncation params (provided with :meth:`~tokenizers.Tokenizer.enable_truncation`) 2. Apply the :class:`~tokenizers.processors.PostProcessor` 3. Pad according to the set padding params (provided with :meth:`~tokenizers.Tokenizer.enable_padding`) Args: encoding (:class:`~tokenizers.Encoding`): The :class:`~tokenizers.Encoding` corresponding to the main sequence. pair (:class:`~tokenizers.Encoding`, `optional`): An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence. add_special_tokens (:obj:`bool`): Whether to add the special tokens Returns: :class:`~tokenizers.Encoding`: The final post-processed encoding """ pass @property def post_processor(self): """ The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer """ pass @property def pre_tokenizer(self): """ The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer """ pass def save(self, path, pretty=True): """ Save the :class:`~tokenizers.Tokenizer` to the file at the given path. Args: path (:obj:`str`): A path to a file in which to save the serialized tokenizer. pretty (:obj:`bool`, defaults to :obj:`True`): Whether the JSON file should be pretty formatted. """ pass def to_str(self, pretty=False): """ Gets a serialized string representing this :class:`~tokenizers.Tokenizer`. Args: pretty (:obj:`bool`, defaults to :obj:`False`): Whether the JSON string should be pretty formatted. Returns: :obj:`str`: A string representing the serialized Tokenizer """ pass def token_to_id(self, token): """ Convert the given token to its corresponding id if it exists Args: token (:obj:`str`): The token to convert Returns: :obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary """ pass def train(self, files, trainer=None): """ Train the Tokenizer using the given files. Reads the files line by line, while keeping all the whitespace, even new lines. If you want to train from data store in-memory, you can check :meth:`~tokenizers.Tokenizer.train_from_iterator` Args: files (:obj:`List[str]`): A list of path to the files that we should use for training trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): An optional trainer that should be used to train our Model """ pass def train_from_iterator(self, iterator, trainer=None, length=None): """ Train the Tokenizer using the provided iterator. You can provide anything that is a Python Iterator * A list of sequences :obj:`List[str]` * A generator that yields :obj:`str` or :obj:`List[str]` * A Numpy array of strings * ... Args: iterator (:obj:`Iterator`): Any iterator over strings or list of strings trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): An optional trainer that should be used to train our Model length (:obj:`int`, `optional`): The total number of sequences in the iterator. This is used to provide meaningful progress tracking """ pass @property def truncation(self): """ Get the currently set truncation parameters `Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead` Returns: (:obj:`dict`, `optional`): A dict with the current truncation parameters if truncation is enabled """ pass
tokenizers/bindings/python/py_src/tokenizers/__init__.pyi/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/__init__.pyi", "repo_id": "tokenizers", "token_count": 16502 }
208
# Generated content DO NOT EDIT from .. import processors PostProcessor = processors.PostProcessor BertProcessing = processors.BertProcessing ByteLevel = processors.ByteLevel RobertaProcessing = processors.RobertaProcessing Sequence = processors.Sequence TemplateProcessing = processors.TemplateProcessing
tokenizers/bindings/python/py_src/tokenizers/processors/__init__.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/processors/__init__.py", "repo_id": "tokenizers", "token_count": 74 }
209
#![warn(clippy::all)] #![allow(clippy::upper_case_acronyms)] // Many false positives with pyo3 it seems &str, and &PyAny get flagged #![allow(clippy::borrow_deref_ref)] extern crate tokenizers as tk; mod decoders; mod encoding; mod error; mod models; mod normalizers; mod pre_tokenizers; mod processors; mod token; mod tokenizer; mod trainers; mod utils; use pyo3::prelude::*; use pyo3::wrap_pymodule; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); // For users using multiprocessing in python, it is quite easy to fork the process running // tokenizers, ending up with a deadlock because we internaly make use of multithreading. So // we register a callback to be called in the event of a fork so that we can warn the user. #[cfg(target_family = "unix")] static mut REGISTERED_FORK_CALLBACK: bool = false; #[cfg(target_family = "unix")] extern "C" fn child_after_fork() { use tk::parallelism::*; if has_parallelism_been_used() && !is_parallelism_configured() { eprintln!( "huggingface/tokenizers: The current process just got forked, after parallelism has \ already been used. Disabling parallelism to avoid deadlocks..." ); eprintln!("To disable this warning, you can either:"); eprintln!( "\t- Avoid using `tokenizers` before the fork if possible\n\ \t- Explicitly set the environment variable {}=(true | false)", ENV_VARIABLE ); set_parallelism(false); } } /// Tokenizers Module #[pymodule] pub fn tokenizers(_py: Python, m: &PyModule) -> PyResult<()> { let _ = env_logger::try_init_from_env("TOKENIZERS_LOG"); // Register the fork callback #[cfg(target_family = "unix")] unsafe { if !REGISTERED_FORK_CALLBACK { libc::pthread_atfork(None, None, Some(child_after_fork)); REGISTERED_FORK_CALLBACK = true; } } m.add_class::<tokenizer::PyTokenizer>()?; m.add_class::<tokenizer::PyAddedToken>()?; m.add_class::<token::PyToken>()?; m.add_class::<encoding::PyEncoding>()?; m.add_class::<utils::PyRegex>()?; m.add_class::<utils::PyNormalizedString>()?; m.add_class::<utils::PyPreTokenizedString>()?; m.add_wrapped(wrap_pymodule!(models::models))?; m.add_wrapped(wrap_pymodule!(pre_tokenizers::pre_tokenizers))?; m.add_wrapped(wrap_pymodule!(decoders::decoders))?; m.add_wrapped(wrap_pymodule!(processors::processors))?; m.add_wrapped(wrap_pymodule!(normalizers::normalizers))?; m.add_wrapped(wrap_pymodule!(trainers::trainers))?; m.add("__version__", env!("CARGO_PKG_VERSION"))?; Ok(()) }
tokenizers/bindings/python/src/lib.rs/0
{ "file_path": "tokenizers/bindings/python/src/lib.rs", "repo_id": "tokenizers", "token_count": 1086 }
210
import pytest from tokenizers import ByteLevelBPETokenizer from ..utils import data_dir, multiprocessing_with_parallelism, roberta_files class TestByteLevelBPE: def test_basic_encode(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"]) output = tokenizer.encode("The quick brown fox jumps over the lazy dog") assert output.ids == [133, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335] assert output.tokens == [ "The", "Ġquick", "Ġbrown", "Ġfox", "Ġjumps", "Ġover", "Ġthe", "Ġlazy", "Ġdog", ] assert output.offsets == [ (0, 3), (3, 9), (9, 15), (15, 19), (19, 25), (25, 30), (30, 34), (34, 39), (39, 43), ] def test_add_prefix_space(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file( roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True ) output = tokenizer.encode("The quick brown fox jumps over the lazy dog") assert output.ids == [20, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335] assert output.tokens == [ "ĠThe", "Ġquick", "Ġbrown", "Ġfox", "Ġjumps", "Ġover", "Ġthe", "Ġlazy", "Ġdog", ] assert output.offsets == [ (0, 3), (3, 9), (9, 15), (15, 19), (19, 25), (25, 30), (30, 34), (34, 39), (39, 43), ] def test_lowerspace(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file( roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True, lowercase=True, ) output = tokenizer.encode("The Quick Brown Fox Jumps Over The Lazy Dog") assert output.ids == [5, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335] assert output.tokens == [ "Ġthe", "Ġquick", "Ġbrown", "Ġfox", "Ġjumps", "Ġover", "Ġthe", "Ġlazy", "Ġdog", ] def test_multiprocessing_with_parallelism(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"]) multiprocessing_with_parallelism(tokenizer, False) multiprocessing_with_parallelism(tokenizer, True) def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = ByteLevelBPETokenizer() tokenizer.train_from_iterator(text, show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["A", "Ġsentence"]
tokenizers/bindings/python/tests/implementations/test_byte_level_bpe.py/0
{ "file_path": "tokenizers/bindings/python/tests/implementations/test_byte_level_bpe.py", "repo_id": "tokenizers", "token_count": 1658 }
211
# Pre-tokenizers <tokenizerslangcontent> <python> ## BertPreTokenizer [[autodoc]] tokenizers.pre_tokenizers.BertPreTokenizer ## ByteLevel [[autodoc]] tokenizers.pre_tokenizers.ByteLevel ## CharDelimiterSplit [[autodoc]] tokenizers.pre_tokenizers.CharDelimiterSplit ## Digits [[autodoc]] tokenizers.pre_tokenizers.Digits ## Metaspace [[autodoc]] tokenizers.pre_tokenizers.Metaspace ## PreTokenizer [[autodoc]] tokenizers.pre_tokenizers.PreTokenizer ## Punctuation [[autodoc]] tokenizers.pre_tokenizers.Punctuation ## Sequence [[autodoc]] tokenizers.pre_tokenizers.Sequence ## Split [[autodoc]] tokenizers.pre_tokenizers.Split ## UnicodeScripts [[autodoc]] tokenizers.pre_tokenizers.UnicodeScripts ## Whitespace [[autodoc]] tokenizers.pre_tokenizers.Whitespace ## WhitespaceSplit [[autodoc]] tokenizers.pre_tokenizers.WhitespaceSplit </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/pre-tokenizers.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/pre-tokenizers.mdx", "repo_id": "tokenizers", "token_count": 371 }
212
The tokenization pipeline ==================================================================================================== When calling :entity:`Tokenizer.encode` or :entity:`Tokenizer.encode_batch`, the input text(s) go through the following pipeline: - :ref:`normalization` - :ref:`pre-tokenization` - :ref:`model` - :ref:`post-processing` We'll see in details what happens during each of those steps in detail, as well as when you want to :ref:`decode <decoding>` some token ids, and how the 🤗 Tokenizers library allows you to customize each of those steps to your needs. If you're already familiar with those steps and want to learn by seeing some code, jump to :ref:`our BERT from scratch example <example>`. For the examples that require a :entity:`Tokenizer`, we will use the tokenizer we trained in the :doc:`quicktour`, which you can load with: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START reload_tokenizer :end-before: END reload_tokenizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_reload_tokenizer :end-before: END pipeline_reload_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START reload_tokenizer :end-before: END reload_tokenizer :dedent: 8 .. _normalization: Normalization ---------------------------------------------------------------------------------------------------- Normalization is, in a nutshell, a set of operations you apply to a raw string to make it less random or "cleaner". Common operations include stripping whitespace, removing accented characters or lowercasing all text. If you're familiar with `Unicode normalization <https://unicode.org/reports/tr15>`__, it is also a very common normalization operation applied in most tokenizers. Each normalization operation is represented in the 🤗 Tokenizers library by a :entity:`Normalizer`, and you can combine several of those by using a :entity:`normalizers.Sequence`. Here is a normalizer applying NFD Unicode normalization and removing accents as an example: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START setup_normalizer :end-before: END setup_normalizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_setup_normalizer :end-before: END pipeline_setup_normalizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START setup_normalizer :end-before: END setup_normalizer :dedent: 8 You can manually test that normalizer by applying it to any string: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START test_normalizer :end-before: END test_normalizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_test_normalizer :end-before: END pipeline_test_normalizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START test_normalizer :end-before: END test_normalizer :dedent: 8 When building a :entity:`Tokenizer`, you can customize its normalizer by just changing the corresponding attribute: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START replace_normalizer :end-before: END replace_normalizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_replace_normalizer :end-before: END pipeline_replace_normalizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START replace_normalizer :end-before: END replace_normalizer :dedent: 8 Of course, if you change the way a tokenizer applies normalization, you should probably retrain it from scratch afterward. .. _pre-tokenization: Pre-Tokenization ---------------------------------------------------------------------------------------------------- Pre-tokenization is the act of splitting a text into smaller objects that give an upper bound to what your tokens will be at the end of training. A good way to think of this is that the pre-tokenizer will split your text into "words" and then, your final tokens will be parts of those words. An easy way to pre-tokenize inputs is to split on spaces and punctuations, which is done by the :entity:`pre_tokenizers.Whitespace` pre-tokenizer: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START setup_pre_tokenizer :end-before: END setup_pre_tokenizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_setup_pre_tokenizer :end-before: END pipeline_setup_pre_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START setup_pre_tokenizer :end-before: END setup_pre_tokenizer :dedent: 8 The output is a list of tuples, with each tuple containing one word and its span in the original sentence (which is used to determine the final :obj:`offsets` of our :entity:`Encoding`). Note that splitting on punctuation will split contractions like :obj:`"I'm"` in this example. You can combine together any :entity:`PreTokenizer` together. For instance, here is a pre-tokenizer that will split on space, punctuation and digits, separating numbers in their individual digits: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START combine_pre_tokenizer :end-before: END combine_pre_tokenizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_combine_pre_tokenizer :end-before: END pipeline_combine_pre_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START combine_pre_tokenizer :end-before: END combine_pre_tokenizer :dedent: 8 As we saw in the :doc:`quicktour`, you can customize the pre-tokenizer of a :entity:`Tokenizer` by just changing the corresponding attribute: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START replace_pre_tokenizer :end-before: END replace_pre_tokenizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_replace_pre_tokenizer :end-before: END pipeline_replace_pre_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START replace_pre_tokenizer :end-before: END replace_pre_tokenizer :dedent: 8 Of course, if you change the way the pre-tokenizer, you should probably retrain your tokenizer from scratch afterward. .. _model: The Model ---------------------------------------------------------------------------------------------------- Once the input texts are normalized and pre-tokenized, the :entity:`Tokenizer` applies the model on the pre-tokens. This is the part of the pipeline that needs training on your corpus (or that has been trained if you are using a pretrained tokenizer). The role of the model is to split your "words" into tokens, using the rules it has learned. It's also responsible for mapping those tokens to their corresponding IDs in the vocabulary of the model. This model is passed along when intializing the :entity:`Tokenizer` so you already know how to customize this part. Currently, the 🤗 Tokenizers library supports: - :entity:`models.BPE` - :entity:`models.Unigram` - :entity:`models.WordLevel` - :entity:`models.WordPiece` For more details about each model and its behavior, you can check `here <components#models>`__ .. _post-processing: Post-Processing ---------------------------------------------------------------------------------------------------- Post-processing is the last step of the tokenization pipeline, to perform any additional transformation to the :entity:`Encoding` before it's returned, like adding potential special tokens. As we saw in the quick tour, we can customize the post processor of a :entity:`Tokenizer` by setting the corresponding attribute. For instance, here is how we can post-process to make the inputs suitable for the BERT model: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START setup_processor :end-before: END setup_processor :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_setup_processor :end-before: END pipeline_setup_processor :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START setup_processor :end-before: END setup_processor :dedent: 8 Note that contrarily to the pre-tokenizer or the normalizer, you don't need to retrain a tokenizer after changing its post-processor. .. _example: All together: a BERT tokenizer from scratch ---------------------------------------------------------------------------------------------------- Let's put all those pieces together to build a BERT tokenizer. First, BERT relies on WordPiece, so we instantiate a new :entity:`Tokenizer` with this model: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START bert_setup_tokenizer :end-before: END bert_setup_tokenizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START bert_setup_tokenizer :end-before: END bert_setup_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START bert_setup_tokenizer :end-before: END bert_setup_tokenizer :dedent: 8 Then we know that BERT preprocesses texts by removing accents and lowercasing. We also use a unicode normalizer: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START bert_setup_normalizer :end-before: END bert_setup_normalizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START bert_setup_normalizer :end-before: END bert_setup_normalizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START bert_setup_normalizer :end-before: END bert_setup_normalizer :dedent: 8 The pre-tokenizer is just splitting on whitespace and punctuation: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START bert_setup_pre_tokenizer :end-before: END bert_setup_pre_tokenizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START bert_setup_pre_tokenizer :end-before: END bert_setup_pre_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START bert_setup_pre_tokenizer :end-before: END bert_setup_pre_tokenizer :dedent: 8 And the post-processing uses the template we saw in the previous section: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START bert_setup_processor :end-before: END bert_setup_processor :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START bert_setup_processor :end-before: END bert_setup_processor :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START bert_setup_processor :end-before: END bert_setup_processor :dedent: 8 We can use this tokenizer and train on it on wikitext like in the :doc:`quicktour`: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START bert_train_tokenizer :end-before: END bert_train_tokenizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START bert_train_tokenizer :end-before: END bert_train_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START bert_train_tokenizer :end-before: END bert_train_tokenizer :dedent: 8 .. _decoding: Decoding ---------------------------------------------------------------------------------------------------- .. entities:: python bert_tokenizer :obj:`bert_tokenizer` .. entities:: rust bert_tokenizer :obj:`bert_tokenizer` .. entities:: node bert_tokenizer :obj:`bertTokenizer` On top of encoding the input texts, a :entity:`Tokenizer` also has an API for decoding, that is converting IDs generated by your model back to a text. This is done by the methods :entity:`Tokenizer.decode` (for one predicted text) and :entity:`Tokenizer.decode_batch` (for a batch of predictions). The `decoder` will first convert the IDs back to tokens (using the tokenizer's vocabulary) and remove all special tokens, then join those tokens with spaces: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START test_decoding :end-before: END test_decoding :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START pipeline_test_decoding :end-before: END pipeline_test_decoding :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START test_decoding :end-before: END test_decoding :dedent: 8 If you used a model that added special characters to represent subtokens of a given "word" (like the :obj:`"##"` in WordPiece) you will need to customize the `decoder` to treat them properly. If we take our previous :entity:`bert_tokenizer` for instance the default decoding will give: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START bert_test_decoding :end-before: END bert_test_decoding :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START bert_test_decoding :end-before: END bert_test_decoding :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START bert_test_decoding :end-before: END bert_test_decoding :dedent: 8 But by changing it to a proper decoder, we get: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py :language: python :start-after: START bert_proper_decoding :end-before: END bert_proper_decoding :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START bert_proper_decoding :end-before: END bert_proper_decoding :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts :language: javascript :start-after: START bert_proper_decoding :end-before: END bert_proper_decoding :dedent: 8
tokenizers/docs/source/pipeline.rst/0
{ "file_path": "tokenizers/docs/source/pipeline.rst", "repo_id": "tokenizers", "token_count": 6323 }
213
[package] name = "unstable_wasm" version = "0.1.0" authors = ["Nicolas Patry"] edition = "2018" [lib] crate-type = ["cdylib", "rlib"] [features] default = ["console_error_panic_hook"] [dependencies] wasm-bindgen = "0.2.63" # The `console_error_panic_hook` crate provides better debugging of panics by # logging them with `console.error`. This is great for development, but requires # all the `std::fmt` and `std::panicking` infrastructure, so isn't great for # code size when deploying. console_error_panic_hook = { version = "0.1.6", optional = true } # `wee_alloc` is a tiny allocator for wasm that is only ~1K in code size # compared to the default allocator's ~10K. It is slower than the default # allocator, however. # # Unfortunately, `wee_alloc` requires nightly Rust when targeting wasm for now. wee_alloc = { version = "0.4.5", optional = true } tokenizers = { path = "../../", default-features=false, features = ["unstable_wasm"]} [dev-dependencies] wasm-bindgen-test = "0.3.13" [profile.release] # Tell `rustc` to optimize for small code size. opt-level = "s"
tokenizers/tokenizers/examples/unstable_wasm/Cargo.toml/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/Cargo.toml", "repo_id": "tokenizers", "token_count": 364 }
214