text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_stable_diffusion_adapter"] = ["StableDiffusionAdapterPipeline"] _import_structure["pipeline_stable_diffusion_xl_adapter"] = ["StableDiffusionXLAdapterPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_stable_diffusion_adapter import StableDiffusionAdapterPipeline from .pipeline_stable_diffusion_xl_adapter import StableDiffusionXLAdapterPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/t2i_adapter/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/t2i_adapter/__init__.py", "repo_id": "diffusers", "token_count": 602 }
import math from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin from ...models.attention import FeedForward from ...models.attention_processor import Attention from ...models.embeddings import TimestepEmbedding, Timesteps, get_2d_sincos_pos_embed from ...models.modeling_outputs import Transformer2DModelOutput from ...models.normalization import AdaLayerNorm from ...utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name def _no_grad_trunc_normal_(tensor, mean, std, a, b): # Cut & paste from PyTorch official master until it's in a few official releases - RW # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf def norm_cdf(x): # Computes standard normal cumulative distribution function return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 if (mean < a - 2 * std) or (mean > b + 2 * std): logger.warning( "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " "The distribution of values may be incorrect." ) with torch.no_grad(): # Values are generated by using a truncated uniform distribution and # then using the inverse CDF for the normal distribution. # Get upper and lower cdf values l = norm_cdf((a - mean) / std) u = norm_cdf((b - mean) / std) # Uniformly fill tensor with values from [l, u], then translate to # [2l-1, 2u-1]. tensor.uniform_(2 * l - 1, 2 * u - 1) # Use inverse cdf transform for normal distribution to get truncated # standard normal tensor.erfinv_() # Transform to proper mean, std tensor.mul_(std * math.sqrt(2.0)) tensor.add_(mean) # Clamp to ensure it's in the proper range tensor.clamp_(min=a, max=b) return tensor def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0): # type: (torch.Tensor, float, float, float, float) -> torch.Tensor r"""Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) """ return _no_grad_trunc_normal_(tensor, mean, std, a, b) class PatchEmbed(nn.Module): """2D Image to Patch Embedding""" def __init__( self, height=224, width=224, patch_size=16, in_channels=3, embed_dim=768, layer_norm=False, flatten=True, bias=True, use_pos_embed=True, ): super().__init__() num_patches = (height // patch_size) * (width // patch_size) self.flatten = flatten self.layer_norm = layer_norm self.proj = nn.Conv2d( in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias ) if layer_norm: self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-6) else: self.norm = None self.use_pos_embed = use_pos_embed if self.use_pos_embed: pos_embed = get_2d_sincos_pos_embed(embed_dim, int(num_patches**0.5), output_type="pt") self.register_buffer("pos_embed", pos_embed.float().unsqueeze(0), persistent=False) def forward(self, latent): latent = self.proj(latent) if self.flatten: latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC if self.layer_norm: latent = self.norm(latent) if self.use_pos_embed: return latent + self.pos_embed else: return latent class SkipBlock(nn.Module): def __init__(self, dim: int): super().__init__() self.skip_linear = nn.Linear(2 * dim, dim) # Use torch.nn.LayerNorm for now, following the original code self.norm = nn.LayerNorm(dim) def forward(self, x, skip): x = self.skip_linear(torch.cat([x, skip], dim=-1)) x = self.norm(x) return x # Modified to support both pre-LayerNorm and post-LayerNorm configurations # Don't support AdaLayerNormZero for now # Modified from diffusers.models.attention.BasicTransformerBlock class UTransformerBlock(nn.Module): r""" A modification of BasicTransformerBlock which supports pre-LayerNorm and post-LayerNorm configurations. Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm (:obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. attention_bias (:obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. only_cross_attention (`bool`, *optional*): Whether to use only cross-attention layers. In this case two cross attention layers are used. double_self_attention (`bool`, *optional*): Whether to use two self-attention layers. In this case no cross attention layers are used. upcast_attention (`bool`, *optional*): Whether to upcast the query and key to float32 when performing the attention calculation. norm_elementwise_affine (`bool`, *optional*): Whether to use learnable per-element affine parameters during layer normalization. norm_type (`str`, defaults to `"layer_norm"`): The layer norm implementation to use. pre_layer_norm (`bool`, *optional*): Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), as opposed to after ("post-LayerNorm"). Note that `BasicTransformerBlock` uses pre-LayerNorm, e.g. `pre_layer_norm = True`. final_dropout (`bool`, *optional*): Whether to use a final Dropout layer after the feedforward network. """ def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, dropout=0.0, cross_attention_dim: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, attention_bias: bool = False, only_cross_attention: bool = False, double_self_attention: bool = False, upcast_attention: bool = False, norm_elementwise_affine: bool = True, norm_type: str = "layer_norm", pre_layer_norm: bool = True, final_dropout: bool = False, ): super().__init__() self.only_cross_attention = only_cross_attention self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" self.pre_layer_norm = pre_layer_norm if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." ) # 1. Self-Attn self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=upcast_attention, ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, upcast_attention=upcast_attention, ) # is self-attn if encoder_hidden_states is none else: self.attn2 = None if self.use_ada_layer_norm: self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) else: self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. self.norm2 = ( AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) ) else: self.norm2 = None # 3. Feed-forward self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout) def forward( self, hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, timestep=None, cross_attention_kwargs=None, class_labels=None, ): # Pre-LayerNorm if self.pre_layer_norm: if self.use_ada_layer_norm: norm_hidden_states = self.norm1(hidden_states, timestep) else: norm_hidden_states = self.norm1(hidden_states) else: norm_hidden_states = hidden_states # 1. Self-Attention cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} attn_output = self.attn1( norm_hidden_states, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=attention_mask, **cross_attention_kwargs, ) # Post-LayerNorm if not self.pre_layer_norm: if self.use_ada_layer_norm: attn_output = self.norm1(attn_output, timestep) else: attn_output = self.norm1(attn_output) hidden_states = attn_output + hidden_states if self.attn2 is not None: # Pre-LayerNorm if self.pre_layer_norm: norm_hidden_states = ( self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) ) else: norm_hidden_states = hidden_states # TODO (Birch-San): Here we should prepare the encoder_attention mask correctly # prepare attention mask here # 2. Cross-Attention attn_output = self.attn2( norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, **cross_attention_kwargs, ) # Post-LayerNorm if not self.pre_layer_norm: attn_output = self.norm2(attn_output, timestep) if self.use_ada_layer_norm else self.norm2(attn_output) hidden_states = attn_output + hidden_states # 3. Feed-forward # Pre-LayerNorm if self.pre_layer_norm: norm_hidden_states = self.norm3(hidden_states) else: norm_hidden_states = hidden_states ff_output = self.ff(norm_hidden_states) # Post-LayerNorm if not self.pre_layer_norm: ff_output = self.norm3(ff_output) hidden_states = ff_output + hidden_states return hidden_states # Like UTransformerBlock except with LayerNorms on the residual backbone of the block # Modified from diffusers.models.attention.BasicTransformerBlock class UniDiffuserBlock(nn.Module): r""" A modification of BasicTransformerBlock which supports pre-LayerNorm and post-LayerNorm configurations and puts the LayerNorms on the residual backbone of the block. This matches the transformer block in the [original UniDiffuser implementation](https://github.com/thu-ml/unidiffuser/blob/main/libs/uvit_multi_post_ln_v1.py#L104). Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm (:obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. attention_bias (:obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. only_cross_attention (`bool`, *optional*): Whether to use only cross-attention layers. In this case two cross attention layers are used. double_self_attention (`bool`, *optional*): Whether to use two self-attention layers. In this case no cross attention layers are used. upcast_attention (`bool`, *optional*): Whether to upcast the query and key to float() when performing the attention calculation. norm_elementwise_affine (`bool`, *optional*): Whether to use learnable per-element affine parameters during layer normalization. norm_type (`str`, defaults to `"layer_norm"`): The layer norm implementation to use. pre_layer_norm (`bool`, *optional*): Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm (`pre_layer_norm = False`). final_dropout (`bool`, *optional*): Whether to use a final Dropout layer after the feedforward network. """ def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, dropout=0.0, cross_attention_dim: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, attention_bias: bool = False, only_cross_attention: bool = False, double_self_attention: bool = False, upcast_attention: bool = False, norm_elementwise_affine: bool = True, norm_type: str = "layer_norm", pre_layer_norm: bool = False, final_dropout: bool = True, ): super().__init__() self.only_cross_attention = only_cross_attention self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" self.pre_layer_norm = pre_layer_norm if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." ) # 1. Self-Attn self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=upcast_attention, ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, upcast_attention=upcast_attention, ) # is self-attn if encoder_hidden_states is none else: self.attn2 = None if self.use_ada_layer_norm: self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) else: self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. self.norm2 = ( AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) ) else: self.norm2 = None # 3. Feed-forward self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout) def forward( self, hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, timestep=None, cross_attention_kwargs=None, class_labels=None, ): # Following the diffusers transformer block implementation, put the LayerNorm on the # residual backbone # Pre-LayerNorm if self.pre_layer_norm: if self.use_ada_layer_norm: hidden_states = self.norm1(hidden_states, timestep) else: hidden_states = self.norm1(hidden_states) # 1. Self-Attention cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} attn_output = self.attn1( hidden_states, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=attention_mask, **cross_attention_kwargs, ) hidden_states = attn_output + hidden_states # Following the diffusers transformer block implementation, put the LayerNorm on the # residual backbone # Post-LayerNorm if not self.pre_layer_norm: if self.use_ada_layer_norm: hidden_states = self.norm1(hidden_states, timestep) else: hidden_states = self.norm1(hidden_states) if self.attn2 is not None: # Pre-LayerNorm if self.pre_layer_norm: hidden_states = ( self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) ) # TODO (Birch-San): Here we should prepare the encoder_attention mask correctly # prepare attention mask here # 2. Cross-Attention attn_output = self.attn2( hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, **cross_attention_kwargs, ) hidden_states = attn_output + hidden_states # Post-LayerNorm if not self.pre_layer_norm: hidden_states = ( self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) ) # 3. Feed-forward # Pre-LayerNorm if self.pre_layer_norm: hidden_states = self.norm3(hidden_states) ff_output = self.ff(hidden_states) hidden_states = ff_output + hidden_states # Post-LayerNorm if not self.pre_layer_norm: hidden_states = self.norm3(hidden_states) return hidden_states # Modified from diffusers.models.transformer_2d.Transformer2DModel # Modify the transformer block structure to be U-Net like following U-ViT # Only supports patch-style input and torch.nn.LayerNorm currently # https://github.com/baofff/U-ViT class UTransformer2DModel(ModelMixin, ConfigMixin): """ Transformer model based on the [U-ViT](https://github.com/baofff/U-ViT) architecture for image-like data. Compared to [`Transformer2DModel`], this model has skip connections between transformer blocks in a "U"-shaped fashion, similar to a U-Net. Supports only continuous (actual embeddings) inputs, which are embedded via a [`PatchEmbed`] layer and then reshaped to (b, t, d). Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): Pass if the input is continuous. The number of channels in the input. out_channels (`int`, *optional*): The number of output channels; if `None`, defaults to `in_channels`. num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. norm_num_groups (`int`, *optional*, defaults to `32`): The number of groups to use when performing Group Normalization. cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. attention_bias (`bool`, *optional*): Configure if the TransformerBlocks' attention should contain a bias parameter. sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. Note that this is fixed at training time as it is used for learning a number of position embeddings. See `ImagePositionalEmbeddings`. num_vector_embeds (`int`, *optional*): Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. Includes the class for the masked latent pixel. patch_size (`int`, *optional*, defaults to 2): The patch size to use in the patch embedding. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. The number of diffusion steps used during training. Note that this is fixed at training time as it is used to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for up to but not more than steps than `num_embeds_ada_norm`. use_linear_projection (int, *optional*): TODO: Not used only_cross_attention (`bool`, *optional*): Whether to use only cross-attention layers. In this case two cross attention layers are used in each transformer block. upcast_attention (`bool`, *optional*): Whether to upcast the query and key to float() when performing the attention calculation. norm_type (`str`, *optional*, defaults to `"layer_norm"`): The Layer Normalization implementation to use. Defaults to `torch.nn.LayerNorm`. block_type (`str`, *optional*, defaults to `"unidiffuser"`): The transformer block implementation to use. If `"unidiffuser"`, has the LayerNorms on the residual backbone of each transformer block; otherwise has them in the attention/feedforward branches (the standard behavior in `diffusers`.) pre_layer_norm (`bool`, *optional*): Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm (`pre_layer_norm = False`). norm_elementwise_affine (`bool`, *optional*): Whether to use learnable per-element affine parameters during layer normalization. use_patch_pos_embed (`bool`, *optional*): Whether to use position embeddings inside the patch embedding layer (`PatchEmbed`). final_dropout (`bool`, *optional*): Whether to use a final Dropout layer after the feedforward network. """ @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, num_vector_embeds: Optional[int] = None, patch_size: Optional[int] = 2, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, norm_type: str = "layer_norm", block_type: str = "unidiffuser", pre_layer_norm: bool = False, norm_elementwise_affine: bool = True, use_patch_pos_embed=False, ff_final_dropout: bool = False, ): super().__init__() self.use_linear_projection = use_linear_projection self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim # 1. Input # Only support patch input of shape (batch_size, num_channels, height, width) for now assert in_channels is not None and patch_size is not None, "Patch input requires in_channels and patch_size." assert sample_size is not None, "UTransformer2DModel over patched input must provide sample_size" # 2. Define input layers self.height = sample_size self.width = sample_size self.patch_size = patch_size self.pos_embed = PatchEmbed( height=sample_size, width=sample_size, patch_size=patch_size, in_channels=in_channels, embed_dim=inner_dim, use_pos_embed=use_patch_pos_embed, ) # 3. Define transformers blocks # Modify this to have in_blocks ("downsample" blocks, even though we don't actually downsample), a mid_block, # and out_blocks ("upsample" blocks). Like a U-Net, there are skip connections from in_blocks to out_blocks in # a "U"-shaped fashion (e.g. first in_block to last out_block, etc.). # Quick hack to make the transformer block type configurable if block_type == "unidiffuser": block_cls = UniDiffuserBlock else: block_cls = UTransformerBlock self.transformer_in_blocks = nn.ModuleList( [ block_cls( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=attention_bias, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, norm_type=norm_type, pre_layer_norm=pre_layer_norm, norm_elementwise_affine=norm_elementwise_affine, final_dropout=ff_final_dropout, ) for d in range(num_layers // 2) ] ) self.transformer_mid_block = block_cls( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=attention_bias, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, norm_type=norm_type, pre_layer_norm=pre_layer_norm, norm_elementwise_affine=norm_elementwise_affine, final_dropout=ff_final_dropout, ) # For each skip connection, we use a SkipBlock (concatenation + Linear + LayerNorm) to process the inputs # before each transformer out_block. self.transformer_out_blocks = nn.ModuleList( [ nn.ModuleDict( { "skip": SkipBlock( inner_dim, ), "block": block_cls( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=attention_bias, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, norm_type=norm_type, pre_layer_norm=pre_layer_norm, norm_elementwise_affine=norm_elementwise_affine, final_dropout=ff_final_dropout, ), } ) for d in range(num_layers // 2) ] ) # 4. Define output layers self.out_channels = in_channels if out_channels is None else out_channels # Following the UniDiffuser U-ViT implementation, we process the transformer output with # a LayerNorm layer with per-element affine params self.norm_out = nn.LayerNorm(inner_dim) def forward( self, hidden_states, encoder_hidden_states=None, timestep=None, class_labels=None, cross_attention_kwargs=None, return_dict: bool = True, hidden_states_is_embedding: bool = False, unpatchify: bool = True, ): """ Args: hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`. When continuous, `torch.Tensor` of shape `(batch size, channel, height, width)`): Input hidden_states encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. timestep ( `torch.long`, *optional*): Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step. class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): Optional class labels to be applied as an embedding in AdaLayerZeroNorm. Used to indicate class labels conditioning. cross_attention_kwargs (*optional*): Keyword arguments to supply to the cross attention layers, if used. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. hidden_states_is_embedding (`bool`, *optional*, defaults to `False`): Whether or not hidden_states is an embedding directly usable by the transformer. In this case we will ignore input handling (e.g. continuous, vectorized, etc.) and directly feed hidden_states into the transformer blocks. unpatchify (`bool`, *optional*, defaults to `True`): Whether to unpatchify the transformer output. Returns: [`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`: [`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ # 0. Check inputs if not unpatchify and return_dict: raise ValueError( f"Cannot both define `unpatchify`: {unpatchify} and `return_dict`: {return_dict} since when" f" `unpatchify` is {unpatchify} the returned output is of shape (batch_size, seq_len, hidden_dim)" " rather than (batch_size, num_channels, height, width)." ) # 1. Input if not hidden_states_is_embedding: hidden_states = self.pos_embed(hidden_states) # 2. Blocks # In ("downsample") blocks skips = [] for in_block in self.transformer_in_blocks: hidden_states = in_block( hidden_states, encoder_hidden_states=encoder_hidden_states, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels, ) skips.append(hidden_states) # Mid block hidden_states = self.transformer_mid_block(hidden_states) # Out ("upsample") blocks for out_block in self.transformer_out_blocks: hidden_states = out_block["skip"](hidden_states, skips.pop()) hidden_states = out_block["block"]( hidden_states, encoder_hidden_states=encoder_hidden_states, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels, ) # 3. Output # Don't support AdaLayerNorm for now, so no conditioning/scale/shift logic hidden_states = self.norm_out(hidden_states) # hidden_states = self.proj_out(hidden_states) if unpatchify: # unpatchify height = width = int(hidden_states.shape[1] ** 0.5) hidden_states = hidden_states.reshape( shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) ) else: output = hidden_states if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) class UniDiffuserModel(ModelMixin, ConfigMixin): """ Transformer model for a image-text [UniDiffuser](https://arxiv.org/pdf/2303.06555.pdf) model. This is a modification of [`UTransformer2DModel`] with input and output heads for the VAE-embedded latent image, the CLIP-embedded image, and the CLIP-embedded prompt (see paper for more details). Parameters: text_dim (`int`): The hidden dimension of the CLIP text model used to embed images. clip_img_dim (`int`): The hidden dimension of the CLIP vision model used to embed prompts. num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): Pass if the input is continuous. The number of channels in the input. out_channels (`int`, *optional*): The number of output channels; if `None`, defaults to `in_channels`. num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. norm_num_groups (`int`, *optional*, defaults to `32`): The number of groups to use when performing Group Normalization. cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. attention_bias (`bool`, *optional*): Configure if the TransformerBlocks' attention should contain a bias parameter. sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. Note that this is fixed at training time as it is used for learning a number of position embeddings. See `ImagePositionalEmbeddings`. num_vector_embeds (`int`, *optional*): Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. Includes the class for the masked latent pixel. patch_size (`int`, *optional*, defaults to 2): The patch size to use in the patch embedding. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. The number of diffusion steps used during training. Note that this is fixed at training time as it is used to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for up to but not more than steps than `num_embeds_ada_norm`. use_linear_projection (int, *optional*): TODO: Not used only_cross_attention (`bool`, *optional*): Whether to use only cross-attention layers. In this case two cross attention layers are used in each transformer block. upcast_attention (`bool`, *optional*): Whether to upcast the query and key to float32 when performing the attention calculation. norm_type (`str`, *optional*, defaults to `"layer_norm"`): The Layer Normalization implementation to use. Defaults to `torch.nn.LayerNorm`. block_type (`str`, *optional*, defaults to `"unidiffuser"`): The transformer block implementation to use. If `"unidiffuser"`, has the LayerNorms on the residual backbone of each transformer block; otherwise has them in the attention/feedforward branches (the standard behavior in `diffusers`.) pre_layer_norm (`bool`, *optional*): Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm (`pre_layer_norm = False`). norm_elementwise_affine (`bool`, *optional*): Whether to use learnable per-element affine parameters during layer normalization. use_patch_pos_embed (`bool`, *optional*): Whether to use position embeddings inside the patch embedding layer (`PatchEmbed`). ff_final_dropout (`bool`, *optional*): Whether to use a final Dropout layer after the feedforward network. use_data_type_embedding (`bool`, *optional*): Whether to use a data type embedding. This is only relevant for UniDiffuser-v1 style models; UniDiffuser-v1 is continue-trained from UniDiffuser-v0 on non-publically-available data and accepts a `data_type` argument, which can either be `1` to use the weights trained on non-publically-available data or `0` otherwise. This argument is subsequently embedded by the data type embedding, if used. """ @register_to_config def __init__( self, text_dim: int = 768, clip_img_dim: int = 512, num_text_tokens: int = 77, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, num_vector_embeds: Optional[int] = None, patch_size: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, norm_type: str = "layer_norm", block_type: str = "unidiffuser", pre_layer_norm: bool = False, use_timestep_embedding=False, norm_elementwise_affine: bool = True, use_patch_pos_embed=False, ff_final_dropout: bool = True, use_data_type_embedding: bool = False, ): super().__init__() # 0. Handle dimensions self.inner_dim = num_attention_heads * attention_head_dim assert sample_size is not None, "UniDiffuserModel over patched input must provide sample_size" self.sample_size = sample_size self.in_channels = in_channels self.out_channels = in_channels if out_channels is None else out_channels self.patch_size = patch_size # Assume image is square... self.num_patches = (self.sample_size // patch_size) * (self.sample_size // patch_size) # 1. Define input layers # 1.1 Input layers for text and image input # For now, only support patch input for VAE latent image input self.vae_img_in = PatchEmbed( height=sample_size, width=sample_size, patch_size=patch_size, in_channels=in_channels, embed_dim=self.inner_dim, use_pos_embed=use_patch_pos_embed, ) self.clip_img_in = nn.Linear(clip_img_dim, self.inner_dim) self.text_in = nn.Linear(text_dim, self.inner_dim) # 1.2. Timestep embeddings for t_img, t_text self.timestep_img_proj = Timesteps( self.inner_dim, flip_sin_to_cos=True, downscale_freq_shift=0, ) self.timestep_img_embed = ( TimestepEmbedding( self.inner_dim, 4 * self.inner_dim, out_dim=self.inner_dim, ) if use_timestep_embedding else nn.Identity() ) self.timestep_text_proj = Timesteps( self.inner_dim, flip_sin_to_cos=True, downscale_freq_shift=0, ) self.timestep_text_embed = ( TimestepEmbedding( self.inner_dim, 4 * self.inner_dim, out_dim=self.inner_dim, ) if use_timestep_embedding else nn.Identity() ) # 1.3. Positional embedding self.num_text_tokens = num_text_tokens self.num_tokens = 1 + 1 + num_text_tokens + 1 + self.num_patches self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, self.inner_dim)) self.pos_embed_drop = nn.Dropout(p=dropout) trunc_normal_(self.pos_embed, std=0.02) # 1.4. Handle data type token embeddings for UniDiffuser-V1, if necessary self.use_data_type_embedding = use_data_type_embedding if self.use_data_type_embedding: self.data_type_token_embedding = nn.Embedding(2, self.inner_dim) self.data_type_pos_embed_token = nn.Parameter(torch.zeros(1, 1, self.inner_dim)) # 2. Define transformer blocks self.transformer = UTransformer2DModel( num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, in_channels=in_channels, out_channels=out_channels, num_layers=num_layers, dropout=dropout, norm_num_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, attention_bias=attention_bias, sample_size=sample_size, num_vector_embeds=num_vector_embeds, patch_size=patch_size, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, norm_type=norm_type, block_type=block_type, pre_layer_norm=pre_layer_norm, norm_elementwise_affine=norm_elementwise_affine, use_patch_pos_embed=use_patch_pos_embed, ff_final_dropout=ff_final_dropout, ) # 3. Define output layers patch_dim = (patch_size**2) * out_channels self.vae_img_out = nn.Linear(self.inner_dim, patch_dim) self.clip_img_out = nn.Linear(self.inner_dim, clip_img_dim) self.text_out = nn.Linear(self.inner_dim, text_dim) @torch.jit.ignore def no_weight_decay(self): return {"pos_embed"} def forward( self, latent_image_embeds: torch.Tensor, image_embeds: torch.Tensor, prompt_embeds: torch.Tensor, timestep_img: Union[torch.Tensor, float, int], timestep_text: Union[torch.Tensor, float, int], data_type: Optional[Union[torch.Tensor, float, int]] = 1, encoder_hidden_states=None, cross_attention_kwargs=None, ): """ Args: latent_image_embeds (`torch.Tensor` of shape `(batch size, latent channels, height, width)`): Latent image representation from the VAE encoder. image_embeds (`torch.Tensor` of shape `(batch size, 1, clip_img_dim)`): CLIP-embedded image representation (unsqueezed in the first dimension). prompt_embeds (`torch.Tensor` of shape `(batch size, seq_len, text_dim)`): CLIP-embedded text representation. timestep_img (`torch.long` or `float` or `int`): Current denoising step for the image. timestep_text (`torch.long` or `float` or `int`): Current denoising step for the text. data_type: (`torch.int` or `float` or `int`, *optional*, defaults to `1`): Only used in UniDiffuser-v1-style models. Can be either `1`, to use weights trained on nonpublic data, or `0` otherwise. encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. cross_attention_kwargs (*optional*): Keyword arguments to supply to the cross attention layers, if used. Returns: `tuple`: Returns relevant parts of the model's noise prediction: the first element of the tuple is tbe VAE image embedding, the second element is the CLIP image embedding, and the third element is the CLIP text embedding. """ batch_size = latent_image_embeds.shape[0] # 1. Input # 1.1. Map inputs to shape (B, N, inner_dim) vae_hidden_states = self.vae_img_in(latent_image_embeds) clip_hidden_states = self.clip_img_in(image_embeds) text_hidden_states = self.text_in(prompt_embeds) num_text_tokens, num_img_tokens = text_hidden_states.size(1), vae_hidden_states.size(1) # 1.2. Encode image timesteps to single token (B, 1, inner_dim) if not torch.is_tensor(timestep_img): timestep_img = torch.tensor([timestep_img], dtype=torch.long, device=vae_hidden_states.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep_img = timestep_img * torch.ones(batch_size, dtype=timestep_img.dtype, device=timestep_img.device) timestep_img_token = self.timestep_img_proj(timestep_img) # t_img_token does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. timestep_img_token = timestep_img_token.to(dtype=self.dtype) timestep_img_token = self.timestep_img_embed(timestep_img_token) timestep_img_token = timestep_img_token.unsqueeze(dim=1) # 1.3. Encode text timesteps to single token (B, 1, inner_dim) if not torch.is_tensor(timestep_text): timestep_text = torch.tensor([timestep_text], dtype=torch.long, device=vae_hidden_states.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep_text = timestep_text * torch.ones(batch_size, dtype=timestep_text.dtype, device=timestep_text.device) timestep_text_token = self.timestep_text_proj(timestep_text) # t_text_token does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. timestep_text_token = timestep_text_token.to(dtype=self.dtype) timestep_text_token = self.timestep_text_embed(timestep_text_token) timestep_text_token = timestep_text_token.unsqueeze(dim=1) # 1.4. Concatenate all of the embeddings together. if self.use_data_type_embedding: assert data_type is not None, "data_type must be supplied if the model uses a data type embedding" if not torch.is_tensor(data_type): data_type = torch.tensor([data_type], dtype=torch.int, device=vae_hidden_states.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML data_type = data_type * torch.ones(batch_size, dtype=data_type.dtype, device=data_type.device) data_type_token = self.data_type_token_embedding(data_type).unsqueeze(dim=1) hidden_states = torch.cat( [ timestep_img_token, timestep_text_token, data_type_token, text_hidden_states, clip_hidden_states, vae_hidden_states, ], dim=1, ) else: hidden_states = torch.cat( [timestep_img_token, timestep_text_token, text_hidden_states, clip_hidden_states, vae_hidden_states], dim=1, ) # 1.5. Prepare the positional embeddings and add to hidden states # Note: I think img_vae should always have the proper shape, so there's no need to interpolate # the position embeddings. if self.use_data_type_embedding: pos_embed = torch.cat( [self.pos_embed[:, : 1 + 1, :], self.data_type_pos_embed_token, self.pos_embed[:, 1 + 1 :, :]], dim=1 ) else: pos_embed = self.pos_embed hidden_states = hidden_states + pos_embed hidden_states = self.pos_embed_drop(hidden_states) # 2. Blocks hidden_states = self.transformer( hidden_states, encoder_hidden_states=encoder_hidden_states, timestep=None, class_labels=None, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, hidden_states_is_embedding=True, unpatchify=False, )[0] # 3. Output # Split out the predicted noise representation. if self.use_data_type_embedding: ( t_img_token_out, t_text_token_out, data_type_token_out, text_out, img_clip_out, img_vae_out, ) = hidden_states.split((1, 1, 1, num_text_tokens, 1, num_img_tokens), dim=1) else: t_img_token_out, t_text_token_out, text_out, img_clip_out, img_vae_out = hidden_states.split( (1, 1, num_text_tokens, 1, num_img_tokens), dim=1 ) img_vae_out = self.vae_img_out(img_vae_out) # unpatchify height = width = int(img_vae_out.shape[1] ** 0.5) img_vae_out = img_vae_out.reshape( shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) ) img_vae_out = torch.einsum("nhwpqc->nchpwq", img_vae_out) img_vae_out = img_vae_out.reshape( shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) ) img_clip_out = self.clip_img_out(img_clip_out) text_out = self.text_out(text_out) return img_vae_out, img_clip_out, text_out
diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py", "repo_id": "diffusers", "token_count": 24169 }
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Adapted from https://github.com/huggingface/transformers/blob/c409cd81777fb27aadc043ed3d8339dbc020fb3b/src/transformers/integrations/bitsandbytes.py """ import inspect from inspect import signature from typing import Union from ...utils import is_accelerate_available, is_bitsandbytes_available, is_torch_available, logging from ..quantization_config import QuantizationMethod if is_torch_available(): import torch import torch.nn as nn if is_bitsandbytes_available(): import bitsandbytes as bnb if is_accelerate_available(): import accelerate from accelerate import init_empty_weights from accelerate.hooks import add_hook_to_module, remove_hook_from_module logger = logging.get_logger(__name__) def _replace_with_bnb_linear( model, modules_to_not_convert=None, current_key_name=None, quantization_config=None, has_been_replaced=False, ): """ Private method that wraps the recursion for module replacement. Returns the converted model and a boolean that indicates if the conversion has been successfull or not. """ for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if isinstance(module, nn.Linear) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` current_key_name_str = ".".join(current_key_name) if not any( (key + "." in current_key_name_str) or (key == current_key_name_str) for key in modules_to_not_convert ): with init_empty_weights(): in_features = module.in_features out_features = module.out_features if quantization_config.quantization_method() == "llm_int8": model._modules[name] = bnb.nn.Linear8bitLt( in_features, out_features, module.bias is not None, has_fp16_weights=quantization_config.llm_int8_has_fp16_weight, threshold=quantization_config.llm_int8_threshold, ) has_been_replaced = True else: if ( quantization_config.llm_int8_skip_modules is not None and name in quantization_config.llm_int8_skip_modules ): pass else: extra_kwargs = ( {"quant_storage": quantization_config.bnb_4bit_quant_storage} if "quant_storage" in list(signature(bnb.nn.Linear4bit).parameters) else {} ) model._modules[name] = bnb.nn.Linear4bit( in_features, out_features, module.bias is not None, quantization_config.bnb_4bit_compute_dtype, compress_statistics=quantization_config.bnb_4bit_use_double_quant, quant_type=quantization_config.bnb_4bit_quant_type, **extra_kwargs, ) has_been_replaced = True # Store the module class in case we need to transpose the weight later model._modules[name].source_cls = type(module) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(False) if len(list(module.children())) > 0: _, has_been_replaced = _replace_with_bnb_linear( module, modules_to_not_convert, current_key_name, quantization_config, has_been_replaced=has_been_replaced, ) # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced def replace_with_bnb_linear(model, modules_to_not_convert=None, current_key_name=None, quantization_config=None): """ Helper function to replace the `nn.Linear` layers within `model` with either `bnb.nn.Linear8bit` or `bnb.nn.Linear4bit` using the `bitsandbytes` library. References: * `bnb.nn.Linear8bit`: [LLM.int8(): 8-bit Matrix Multiplication for Transformers at Scale](https://arxiv.org/abs/2208.07339) * `bnb.nn.Linear4bit`: [QLoRA: Efficient Finetuning of Quantized LLMs](https://arxiv.org/abs/2305.14314) Parameters: model (`torch.nn.Module`): Input model or `torch.nn.Module` as the function is run recursively. modules_to_not_convert (`List[`str`]`, *optional*, defaults to `[]`): Names of the modules to not convert in `Linear8bitLt`. In practice we keep the `modules_to_not_convert` in full precision for numerical stability reasons. current_key_name (`List[`str`]`, *optional*): An array to track the current key of the recursion. This is used to check whether the current key (part of it) is not in the list of modules to not convert (for instances modules that are offloaded to `cpu` or `disk`). quantization_config ('transformers.utils.quantization_config.BitsAndBytesConfig'): To configure and manage settings related to quantization, a technique used to compress neural network models by reducing the precision of the weights and activations, thus making models more efficient in terms of both storage and computation. """ model, has_been_replaced = _replace_with_bnb_linear( model, modules_to_not_convert, current_key_name, quantization_config ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model # Adapted from PEFT: https://github.com/huggingface/peft/blob/6d458b300fc2ed82e19f796b53af4c97d03ea604/src/peft/utils/integrations.py#L81 def dequantize_bnb_weight(weight: "torch.nn.Parameter", state=None, dtype: "torch.dtype" = None): """ Helper function to dequantize 4bit or 8bit bnb weights. If the weight is not a bnb quantized weight, it will be returned as is. """ if not isinstance(weight, torch.nn.Parameter): raise TypeError(f"Input weight should be of type nn.Parameter, got {type(weight)} instead") cls_name = weight.__class__.__name__ if cls_name not in ("Params4bit", "Int8Params"): return weight if cls_name == "Params4bit": output_tensor = bnb.functional.dequantize_4bit(weight.data, weight.quant_state) logger.warning_once( f"The model is going to be dequantized in {output_tensor.dtype} - if you want to upcast it to another dtype, make sure to pass the desired dtype when quantizing the model through `bnb_4bit_quant_type` argument of `BitsAndBytesConfig`" ) return output_tensor if state.SCB is None: state.SCB = weight.SCB if hasattr(bnb.functional, "int8_vectorwise_dequant"): # Use bitsandbytes API if available (requires v0.45.0+) dequantized = bnb.functional.int8_vectorwise_dequant(weight.data, state.SCB) else: # Multiply by (scale/127) to dequantize. dequantized = weight.data * state.SCB.view(-1, 1) * 7.874015718698502e-3 if dtype: dequantized = dequantized.to(dtype) return dequantized def _create_accelerate_new_hook(old_hook): r""" Creates a new hook based on the old hook. Use it only if you know what you are doing ! This method is a copy of: https://github.com/huggingface/peft/blob/748f7968f3a31ec06a1c2b0328993319ad9a150a/src/peft/utils/other.py#L245 with some changes """ old_hook_cls = getattr(accelerate.hooks, old_hook.__class__.__name__) old_hook_attr = old_hook.__dict__ filtered_old_hook_attr = {} old_hook_init_signature = inspect.signature(old_hook_cls.__init__) for k in old_hook_attr.keys(): if k in old_hook_init_signature.parameters: filtered_old_hook_attr[k] = old_hook_attr[k] new_hook = old_hook_cls(**filtered_old_hook_attr) return new_hook def _dequantize_and_replace( model, dtype, modules_to_not_convert=None, current_key_name=None, quantization_config=None, has_been_replaced=False, ): """ Converts a quantized model into its dequantized original version. The newly converted model will have some performance drop compared to the original model before quantization - use it only for specific usecases such as QLoRA adapters merging. Returns the converted model and a boolean that indicates if the conversion has been successfull or not. """ quant_method = quantization_config.quantization_method() target_cls = bnb.nn.Linear8bitLt if quant_method == "llm_int8" else bnb.nn.Linear4bit for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if isinstance(module, target_cls) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` current_key_name_str = ".".join(current_key_name) if not any( (key + "." in current_key_name_str) or (key == current_key_name_str) for key in modules_to_not_convert ): bias = getattr(module, "bias", None) device = module.weight.device with init_empty_weights(): new_module = torch.nn.Linear(module.in_features, module.out_features, bias=bias is not None) if quant_method == "llm_int8": state = module.state else: state = None new_module.weight = torch.nn.Parameter(dequantize_bnb_weight(module.weight, state, dtype)) if bias is not None: new_module.bias = bias # Create a new hook and attach it in case we use accelerate if hasattr(module, "_hf_hook"): old_hook = module._hf_hook new_hook = _create_accelerate_new_hook(old_hook) remove_hook_from_module(module) add_hook_to_module(new_module, new_hook) new_module.to(device) model._modules[name] = new_module has_been_replaced = True if len(list(module.children())) > 0: _, has_been_replaced = _dequantize_and_replace( module, dtype=dtype, modules_to_not_convert=modules_to_not_convert, current_key_name=current_key_name, quantization_config=quantization_config, has_been_replaced=has_been_replaced, ) # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced def dequantize_and_replace( model, modules_to_not_convert=None, quantization_config=None, ): model, has_been_replaced = _dequantize_and_replace( model, dtype=model.dtype, modules_to_not_convert=modules_to_not_convert, quantization_config=quantization_config, ) if not has_been_replaced: logger.warning( "For some reason the model has not been properly dequantized. You might see unexpected behavior." ) return model def _check_bnb_status(module) -> Union[bool, bool]: is_loaded_in_4bit_bnb = ( hasattr(module, "is_loaded_in_4bit") and module.is_loaded_in_4bit and getattr(module, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES ) is_loaded_in_8bit_bnb = ( hasattr(module, "is_loaded_in_8bit") and module.is_loaded_in_8bit and getattr(module, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES ) return is_loaded_in_4bit_bnb or is_loaded_in_8bit_bnb, is_loaded_in_4bit_bnb, is_loaded_in_8bit_bnb
diffusers/src/diffusers/quantizers/bitsandbytes/utils.py/0
{ "file_path": "diffusers/src/diffusers/quantizers/bitsandbytes/utils.py", "repo_id": "diffusers", "token_count": 5920 }
# Copyright 2024 Zhejiang University Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, ) @flax.struct.dataclass class PNDMSchedulerState: common: CommonSchedulerState final_alpha_cumprod: jnp.ndarray # setable values init_noise_sigma: jnp.ndarray timesteps: jnp.ndarray num_inference_steps: Optional[int] = None prk_timesteps: Optional[jnp.ndarray] = None plms_timesteps: Optional[jnp.ndarray] = None # running values cur_model_output: Optional[jnp.ndarray] = None counter: Optional[jnp.int32] = None cur_sample: Optional[jnp.ndarray] = None ets: Optional[jnp.ndarray] = None @classmethod def create( cls, common: CommonSchedulerState, final_alpha_cumprod: jnp.ndarray, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray, ): return cls( common=common, final_alpha_cumprod=final_alpha_cumprod, init_noise_sigma=init_noise_sigma, timesteps=timesteps, ) @dataclass class FlaxPNDMSchedulerOutput(FlaxSchedulerOutput): state: PNDMSchedulerState class FlaxPNDMScheduler(FlaxSchedulerMixin, ConfigMixin): """ Pseudo numerical methods for diffusion models (PNDM) proposes using more advanced ODE integration techniques, namely Runge-Kutta method and a linear multi-step method. [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. For more details, see the original paper: https://arxiv.org/abs/2202.09778 Args: num_train_timesteps (`int`): number of diffusion steps used to train the model. beta_start (`float`): the starting `beta` value of inference. beta_end (`float`): the final `beta` value. beta_schedule (`str`): the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`jnp.ndarray`, optional): option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. skip_prk_steps (`bool`): allows the scheduler to skip the Runge-Kutta steps that are defined in the original paper as being required before plms steps; defaults to `False`. set_alpha_to_one (`bool`, default `False`): each diffusion step uses the value of alphas product at that step and at the previous one. For the final step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, otherwise it uses the value of alpha at step 0. steps_offset (`int`, default `0`): An offset added to the inference steps, as required by some model families. prediction_type (`str`, default `epsilon`, optional): prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 https://imagen.research.google/video/paper.pdf) dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): the `dtype` used for params and computation. """ _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] dtype: jnp.dtype pndm_order: int @property def has_state(self): return True @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[jnp.ndarray] = None, skip_prk_steps: bool = False, set_alpha_to_one: bool = False, steps_offset: int = 0, prediction_type: str = "epsilon", dtype: jnp.dtype = jnp.float32, ): self.dtype = dtype # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. self.pndm_order = 4 def create_state(self, common: Optional[CommonSchedulerState] = None) -> PNDMSchedulerState: if common is None: common = CommonSchedulerState.create(self) # At every step in ddim, we are looking into the previous alphas_cumprod # For the final step, there is no previous alphas_cumprod because we are already at 0 # `set_alpha_to_one` decides whether we set this parameter simply to one or # whether we use the final alpha of the "non-previous" one. final_alpha_cumprod = ( jnp.array(1.0, dtype=self.dtype) if self.config.set_alpha_to_one else common.alphas_cumprod[0] ) # standard deviation of the initial noise distribution init_noise_sigma = jnp.array(1.0, dtype=self.dtype) timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] return PNDMSchedulerState.create( common=common, final_alpha_cumprod=final_alpha_cumprod, init_noise_sigma=init_noise_sigma, timesteps=timesteps, ) def set_timesteps(self, state: PNDMSchedulerState, num_inference_steps: int, shape: Tuple) -> PNDMSchedulerState: """ Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. Args: state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. num_inference_steps (`int`): the number of diffusion steps used when generating samples with a pre-trained model. shape (`Tuple`): the shape of the samples to be generated. """ step_ratio = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 _timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round() + self.config.steps_offset if self.config.skip_prk_steps: # for some models like stable diffusion the prk steps can/should be skipped to # produce better results. When using PNDM with `self.config.skip_prk_steps` the implementation # is based on crowsonkb's PLMS sampler implementation: https://github.com/CompVis/latent-diffusion/pull/51 prk_timesteps = jnp.array([], dtype=jnp.int32) plms_timesteps = jnp.concatenate([_timesteps[:-1], _timesteps[-2:-1], _timesteps[-1:]])[::-1] else: prk_timesteps = _timesteps[-self.pndm_order :].repeat(2) + jnp.tile( jnp.array([0, self.config.num_train_timesteps // num_inference_steps // 2], dtype=jnp.int32), self.pndm_order, ) prk_timesteps = (prk_timesteps[:-1].repeat(2)[1:-1])[::-1] plms_timesteps = _timesteps[:-3][::-1] timesteps = jnp.concatenate([prk_timesteps, plms_timesteps]) # initial running values cur_model_output = jnp.zeros(shape, dtype=self.dtype) counter = jnp.int32(0) cur_sample = jnp.zeros(shape, dtype=self.dtype) ets = jnp.zeros((4,) + shape, dtype=self.dtype) return state.replace( timesteps=timesteps, num_inference_steps=num_inference_steps, prk_timesteps=prk_timesteps, plms_timesteps=plms_timesteps, cur_model_output=cur_model_output, counter=counter, cur_sample=cur_sample, ets=ets, ) def scale_model_input( self, state: PNDMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None ) -> jnp.ndarray: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. sample (`jnp.ndarray`): input sample timestep (`int`, optional): current timestep Returns: `jnp.ndarray`: scaled input sample """ return sample def step( self, state: PNDMSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, return_dict: bool = True, ) -> Union[FlaxPNDMSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). This function calls `step_prk()` or `step_plms()` depending on the internal variable `counter`. Args: state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. model_output (`jnp.ndarray`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class Returns: [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ if state.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) if self.config.skip_prk_steps: prev_sample, state = self.step_plms(state, model_output, timestep, sample) else: prk_prev_sample, prk_state = self.step_prk(state, model_output, timestep, sample) plms_prev_sample, plms_state = self.step_plms(state, model_output, timestep, sample) cond = state.counter < len(state.prk_timesteps) prev_sample = jax.lax.select(cond, prk_prev_sample, plms_prev_sample) state = state.replace( cur_model_output=jax.lax.select(cond, prk_state.cur_model_output, plms_state.cur_model_output), ets=jax.lax.select(cond, prk_state.ets, plms_state.ets), cur_sample=jax.lax.select(cond, prk_state.cur_sample, plms_state.cur_sample), counter=jax.lax.select(cond, prk_state.counter, plms_state.counter), ) if not return_dict: return (prev_sample, state) return FlaxPNDMSchedulerOutput(prev_sample=prev_sample, state=state) def step_prk( self, state: PNDMSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, ) -> Union[FlaxPNDMSchedulerOutput, Tuple]: """ Step function propagating the sample with the Runge-Kutta method. RK takes 4 forward passes to approximate the solution to the differential equation. Args: state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. model_output (`jnp.ndarray`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class Returns: [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ if state.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) diff_to_prev = jnp.where( state.counter % 2, 0, self.config.num_train_timesteps // state.num_inference_steps // 2 ) prev_timestep = timestep - diff_to_prev timestep = state.prk_timesteps[state.counter // 4 * 4] model_output = jax.lax.select( (state.counter % 4) != 3, model_output, # remainder 0, 1, 2 state.cur_model_output + 1 / 6 * model_output, # remainder 3 ) state = state.replace( cur_model_output=jax.lax.select_n( state.counter % 4, state.cur_model_output + 1 / 6 * model_output, # remainder 0 state.cur_model_output + 1 / 3 * model_output, # remainder 1 state.cur_model_output + 1 / 3 * model_output, # remainder 2 jnp.zeros_like(state.cur_model_output), # remainder 3 ), ets=jax.lax.select( (state.counter % 4) == 0, state.ets.at[0:3].set(state.ets[1:4]).at[3].set(model_output), # remainder 0 state.ets, # remainder 1, 2, 3 ), cur_sample=jax.lax.select( (state.counter % 4) == 0, sample, # remainder 0 state.cur_sample, # remainder 1, 2, 3 ), ) cur_sample = state.cur_sample prev_sample = self._get_prev_sample(state, cur_sample, timestep, prev_timestep, model_output) state = state.replace(counter=state.counter + 1) return (prev_sample, state) def step_plms( self, state: PNDMSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, ) -> Union[FlaxPNDMSchedulerOutput, Tuple]: """ Step function propagating the sample with the linear multi-step method. This has one forward pass with multiple times to approximate the solution. Args: state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. model_output (`jnp.ndarray`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class Returns: [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ if state.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) # NOTE: There is no way to check in the jitted runtime if the prk mode was ran before prev_timestep = timestep - self.config.num_train_timesteps // state.num_inference_steps prev_timestep = jnp.where(prev_timestep > 0, prev_timestep, 0) # Reference: # if state.counter != 1: # state.ets.append(model_output) # else: # prev_timestep = timestep # timestep = timestep + self.config.num_train_timesteps // state.num_inference_steps prev_timestep = jnp.where(state.counter == 1, timestep, prev_timestep) timestep = jnp.where( state.counter == 1, timestep + self.config.num_train_timesteps // state.num_inference_steps, timestep ) # Reference: # if len(state.ets) == 1 and state.counter == 0: # model_output = model_output # state.cur_sample = sample # elif len(state.ets) == 1 and state.counter == 1: # model_output = (model_output + state.ets[-1]) / 2 # sample = state.cur_sample # state.cur_sample = None # elif len(state.ets) == 2: # model_output = (3 * state.ets[-1] - state.ets[-2]) / 2 # elif len(state.ets) == 3: # model_output = (23 * state.ets[-1] - 16 * state.ets[-2] + 5 * state.ets[-3]) / 12 # else: # model_output = (1 / 24) * (55 * state.ets[-1] - 59 * state.ets[-2] + 37 * state.ets[-3] - 9 * state.ets[-4]) state = state.replace( ets=jax.lax.select( state.counter != 1, state.ets.at[0:3].set(state.ets[1:4]).at[3].set(model_output), # counter != 1 state.ets, # counter 1 ), cur_sample=jax.lax.select( state.counter != 1, sample, # counter != 1 state.cur_sample, # counter 1 ), ) state = state.replace( cur_model_output=jax.lax.select_n( jnp.clip(state.counter, 0, 4), model_output, # counter 0 (model_output + state.ets[-1]) / 2, # counter 1 (3 * state.ets[-1] - state.ets[-2]) / 2, # counter 2 (23 * state.ets[-1] - 16 * state.ets[-2] + 5 * state.ets[-3]) / 12, # counter 3 (1 / 24) * (55 * state.ets[-1] - 59 * state.ets[-2] + 37 * state.ets[-3] - 9 * state.ets[-4]), # counter >= 4 ), ) sample = state.cur_sample model_output = state.cur_model_output prev_sample = self._get_prev_sample(state, sample, timestep, prev_timestep, model_output) state = state.replace(counter=state.counter + 1) return (prev_sample, state) def _get_prev_sample(self, state: PNDMSchedulerState, sample, timestep, prev_timestep, model_output): # See formula (9) of PNDM paper https://arxiv.org/pdf/2202.09778.pdf # this function computes x_(t−δ) using the formula of (9) # Note that x_t needs to be added to both sides of the equation # Notation (<variable name> -> <name in paper> # alpha_prod_t -> α_t # alpha_prod_t_prev -> α_(t−δ) # beta_prod_t -> (1 - α_t) # beta_prod_t_prev -> (1 - α_(t−δ)) # sample -> x_t # model_output -> e_θ(x_t, t) # prev_sample -> x_(t−δ) alpha_prod_t = state.common.alphas_cumprod[timestep] alpha_prod_t_prev = jnp.where( prev_timestep >= 0, state.common.alphas_cumprod[prev_timestep], state.final_alpha_cumprod ) beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev if self.config.prediction_type == "v_prediction": model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample elif self.config.prediction_type != "epsilon": raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `v_prediction`" ) # corresponds to (α_(t−δ) - α_t) divided by # denominator of x_t in formula (9) and plus 1 # Note: (α_(t−δ) - α_t) / (sqrt(α_t) * (sqrt(α_(t−δ)) + sqr(α_t))) = # sqrt(α_(t−δ)) / sqrt(α_t)) sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** (0.5) # corresponds to denominator of e_θ(x_t, t) in formula (9) model_output_denom_coeff = alpha_prod_t * beta_prod_t_prev ** (0.5) + ( alpha_prod_t * beta_prod_t * alpha_prod_t_prev ) ** (0.5) # full formula (9) prev_sample = ( sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * model_output / model_output_denom_coeff ) return prev_sample def add_noise( self, state: PNDMSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray, ) -> jnp.ndarray: return add_noise_common(state.common, original_samples, noise, timesteps) def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_pndm_flax.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_pndm_flax.py", "repo_id": "diffusers", "token_count": 9597 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Doc utilities: Utilities related to documentation """ import re def replace_example_docstring(example_docstring): def docstring_decorator(fn): func_doc = fn.__doc__ lines = func_doc.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*Examples?:\s*$", lines[i]) is None: i += 1 if i < len(lines): lines[i] = example_docstring func_doc = "\n".join(lines) else: raise ValueError( f"The function {fn} should have an empty 'Examples:' in its docstring as placeholder, " f"current docstring is:\n{func_doc}" ) fn.__doc__ = func_doc return fn return docstring_decorator
diffusers/src/diffusers/utils/doc_utils.py/0
{ "file_path": "diffusers/src/diffusers/utils/doc_utils.py", "repo_id": "diffusers", "token_count": 506 }
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import re import sys import tempfile import warnings from pathlib import Path from typing import Dict, List, Optional, Union from uuid import uuid4 from huggingface_hub import ( DDUFEntry, ModelCard, ModelCardData, create_repo, hf_hub_download, model_info, snapshot_download, upload_folder, ) from huggingface_hub.constants import HF_HUB_DISABLE_TELEMETRY, HF_HUB_OFFLINE from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, validate_hf_hub_args, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger logger = get_logger(__name__) MODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / "model_card_template.md" SESSION_ID = uuid4().hex def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: """ Formats a user-agent string with basic info about a request. """ ua = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" if HF_HUB_DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f"; torch/{_torch_version}" if is_flax_available(): ua += f"; jax/{_jax_version}" ua += f"; flax/{_flax_version}" if is_onnx_available(): ua += f"; onnxruntime/{_onnxruntime_version}" # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(user_agent, dict): ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) elif isinstance(user_agent, str): ua += "; " + user_agent return ua def load_or_create_model_card( repo_id_or_path: str = None, token: Optional[str] = None, is_pipeline: bool = False, from_training: bool = False, model_description: Optional[str] = None, base_model: str = None, prompt: Optional[str] = None, license: Optional[str] = None, widget: Optional[List[dict]] = None, inference: Optional[bool] = None, ) -> ModelCard: """ Loads or creates a model card. Args: repo_id_or_path (`str`): The repo id (e.g., "runwayml/stable-diffusion-v1-5") or local path where to look for the model card. token (`str`, *optional*): Authentication token. Will default to the stored token. See https://huggingface.co/settings/token for more details. is_pipeline (`bool`): Boolean to indicate if we're adding tag to a [`DiffusionPipeline`]. from_training: (`bool`): Boolean flag to denote if the model card is being created from a training script. model_description (`str`, *optional*): Model description to add to the model card. Helpful when using `load_or_create_model_card` from a training script. base_model (`str`): Base model identifier (e.g., "stabilityai/stable-diffusion-xl-base-1.0"). Useful for DreamBooth-like training. prompt (`str`, *optional*): Prompt used for training. Useful for DreamBooth-like training. license: (`str`, *optional*): License of the output artifact. Helpful when using `load_or_create_model_card` from a training script. widget (`List[dict]`, *optional*): Widget to accompany a gallery template. inference: (`bool`, optional): Whether to turn on inference widget. Helpful when using `load_or_create_model_card` from a training script. """ if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `load_or_create_model_card`." " To install it, please run `pip install Jinja2`." ) try: # Check if the model card is present on the remote repo model_card = ModelCard.load(repo_id_or_path, token=token) except (EntryNotFoundError, RepositoryNotFoundError): # Otherwise create a model card from template if from_training: model_card = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block license=license, library_name="diffusers", inference=inference, base_model=base_model, instance_prompt=prompt, widget=widget, ), template_path=MODEL_CARD_TEMPLATE_PATH, model_description=model_description, ) else: card_data = ModelCardData() component = "pipeline" if is_pipeline else "model" if model_description is None: model_description = f"This is the model card of a 🧨 diffusers {component} that has been pushed on the Hub. This model card has been automatically generated." model_card = ModelCard.from_template(card_data, model_description=model_description) return model_card def populate_model_card(model_card: ModelCard, tags: Union[str, List[str]] = None) -> ModelCard: """Populates the `model_card` with library name and optional tags.""" if model_card.data.library_name is None: model_card.data.library_name = "diffusers" if tags is not None: if isinstance(tags, str): tags = [tags] if model_card.data.tags is None: model_card.data.tags = [] for tag in tags: model_card.data.tags.append(tag) return model_card def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str] = None): """ Extracts the commit hash from a resolved filename toward a cache file. """ if resolved_file is None or commit_hash is not None: return commit_hash resolved_file = str(Path(resolved_file).as_posix()) search = re.search(r"snapshots/([^/]+)/", resolved_file) if search is None: return None commit_hash = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None def _add_variant(weights_name: str, variant: Optional[str] = None) -> str: if variant is not None: splits = weights_name.split(".") splits = splits[:-1] + [variant] + splits[-1:] weights_name = ".".join(splits) return weights_name @validate_hf_hub_args def _get_model_file( pretrained_model_name_or_path: Union[str, Path], *, weights_name: str, subfolder: Optional[str] = None, cache_dir: Optional[str] = None, force_download: bool = False, proxies: Optional[Dict] = None, local_files_only: bool = False, token: Optional[str] = None, user_agent: Optional[Union[Dict, str]] = None, revision: Optional[str] = None, commit_hash: Optional[str] = None, dduf_entries: Optional[Dict[str, DDUFEntry]] = None, ): pretrained_model_name_or_path = str(pretrained_model_name_or_path) if dduf_entries: if subfolder is not None: raise ValueError( "DDUF file only allow for 1 level of directory (e.g transformer/model1/model.safetentors is not allowed). " "Please check the DDUF structure" ) model_file = ( weights_name if pretrained_model_name_or_path == "" else "/".join([pretrained_model_name_or_path, weights_name]) ) if model_file in dduf_entries: return model_file else: raise EnvironmentError(f"Error no file named {weights_name} found in archive {dduf_entries.keys()}.") elif os.path.isfile(pretrained_model_name_or_path): return pretrained_model_name_or_path elif os.path.isdir(pretrained_model_name_or_path): if os.path.isfile(os.path.join(pretrained_model_name_or_path, weights_name)): # Load from a PyTorch checkpoint model_file = os.path.join(pretrained_model_name_or_path, weights_name) return model_file elif subfolder is not None and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, weights_name) ): model_file = os.path.join(pretrained_model_name_or_path, subfolder, weights_name) return model_file else: raise EnvironmentError( f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__version__).base_version) >= version.parse("0.22.0") ): try: model_file = hf_hub_download( pretrained_model_name_or_path, filename=_add_variant(weights_name, revision), cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, user_agent=user_agent, subfolder=subfolder, revision=revision or commit_hash, ) warnings.warn( f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.", FutureWarning, ) return model_file except: # noqa: E722 warnings.warn( f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(weights_name, revision)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(weights_name, revision)}' so that the correct variant file can be added.", FutureWarning, ) try: # 2. Load model file as usual model_file = hf_hub_download( pretrained_model_name_or_path, filename=weights_name, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, user_agent=user_agent, subfolder=subfolder, revision=revision or commit_hash, ) return model_file except RepositoryNotFoundError as e: raise EnvironmentError( f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `token` or log in with `huggingface-cli " "login`." ) from e except RevisionNotFoundError as e: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " "this model name. Check the model page at " f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." ) from e except EntryNotFoundError as e: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." ) from e except HTTPError as e: raise EnvironmentError( f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{e}" ) from e except ValueError as e: raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" f" directory containing a file named {weights_name} or" " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) from e except EnvironmentError as e: raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing a file named {weights_name}" ) from e # Adapted from # https://github.com/huggingface/transformers/blob/1360801a69c0b169e3efdbb0cd05d9a0e72bfb70/src/transformers/utils/hub.py#L976 # Differences are in parallelization of shard downloads and checking if shards are present. def _check_if_shards_exist_locally(local_dir, subfolder, original_shard_filenames): shards_path = os.path.join(local_dir, subfolder) shard_filenames = [os.path.join(shards_path, f) for f in original_shard_filenames] for shard_file in shard_filenames: if not os.path.exists(shard_file): raise ValueError( f"{shards_path} does not appear to have a file named {shard_file} which is " "required according to the checkpoint index." ) def _get_checkpoint_shard_files( pretrained_model_name_or_path, index_filename, cache_dir=None, proxies=None, local_files_only=False, token=None, user_agent=None, revision=None, subfolder="", dduf_entries: Optional[Dict[str, DDUFEntry]] = None, ): """ For a given model: - download and cache all the shards of a sharded checkpoint if `pretrained_model_name_or_path` is a model ID on the Hub - returns the list of paths to all the shards, as well as some metadata. For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub). """ if dduf_entries: if index_filename not in dduf_entries: raise ValueError(f"Can't find a checkpoint index ({index_filename}) in {pretrained_model_name_or_path}.") else: if not os.path.isfile(index_filename): raise ValueError(f"Can't find a checkpoint index ({index_filename}) in {pretrained_model_name_or_path}.") if dduf_entries: index = json.loads(dduf_entries[index_filename].read_text()) else: with open(index_filename, "r") as f: index = json.loads(f.read()) original_shard_filenames = sorted(set(index["weight_map"].values())) sharded_metadata = index["metadata"] sharded_metadata["all_checkpoint_keys"] = list(index["weight_map"].keys()) sharded_metadata["weight_map"] = index["weight_map"].copy() shards_path = os.path.join(pretrained_model_name_or_path, subfolder) # First, let's deal with local folder. if os.path.isdir(pretrained_model_name_or_path): _check_if_shards_exist_locally( pretrained_model_name_or_path, subfolder=subfolder, original_shard_filenames=original_shard_filenames ) return shards_path, sharded_metadata elif dduf_entries: return shards_path, sharded_metadata # At this stage pretrained_model_name_or_path is a model identifier on the Hub allow_patterns = original_shard_filenames if subfolder is not None: allow_patterns = [os.path.join(subfolder, p) for p in allow_patterns] ignore_patterns = ["*.json", "*.md"] # `model_info` call must guarded with the above condition. model_files_info = model_info(pretrained_model_name_or_path, revision=revision, token=token) for shard_file in original_shard_filenames: shard_file_present = any(shard_file in k.rfilename for k in model_files_info.siblings) if not shard_file_present: raise EnvironmentError( f"{shards_path} does not appear to have a file named {shard_file} which is " "required according to the checkpoint index." ) try: # Load from URL cached_folder = snapshot_download( pretrained_model_name_or_path, cache_dir=cache_dir, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, user_agent=user_agent, ) if subfolder is not None: cached_folder = os.path.join(cached_folder, subfolder) # We have already dealt with RepositoryNotFoundError and RevisionNotFoundError when getting the index, so # we don't have to catch them here. We have also dealt with EntryNotFoundError. except HTTPError as e: raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load {pretrained_model_name_or_path}. You should try" " again after checking your internet connection." ) from e return cached_folder, sharded_metadata def _check_legacy_sharding_variant_format(folder: str = None, filenames: List[str] = None, variant: str = None): if filenames and folder: raise ValueError("Both `filenames` and `folder` cannot be provided.") if not filenames: filenames = [] for _, _, files in os.walk(folder): for file in files: filenames.append(os.path.basename(file)) transformers_index_format = r"\d{5}-of-\d{5}" variant_file_re = re.compile(rf".*-{transformers_index_format}\.{variant}\.[a-z]+$") return any(variant_file_re.match(f) is not None for f in filenames) class PushToHubMixin: """ A Mixin to push a model, scheduler, or pipeline to the Hugging Face Hub. """ def _upload_folder( self, working_dir: Union[str, os.PathLike], repo_id: str, token: Optional[str] = None, commit_message: Optional[str] = None, create_pr: bool = False, ): """ Uploads all files in `working_dir` to `repo_id`. """ if commit_message is None: if "Model" in self.__class__.__name__: commit_message = "Upload model" elif "Scheduler" in self.__class__.__name__: commit_message = "Upload scheduler" else: commit_message = f"Upload {self.__class__.__name__}" logger.info(f"Uploading the files of {working_dir} to {repo_id}.") return upload_folder( repo_id=repo_id, folder_path=working_dir, token=token, commit_message=commit_message, create_pr=create_pr ) def push_to_hub( self, repo_id: str, commit_message: Optional[str] = None, private: Optional[bool] = None, token: Optional[str] = None, create_pr: bool = False, safe_serialization: bool = True, variant: Optional[str] = None, ) -> str: """ Upload model, scheduler, or pipeline files to the 🤗 Hugging Face Hub. Parameters: repo_id (`str`): The name of the repository you want to push your model, scheduler, or pipeline files to. It should contain your organization name when pushing to an organization. `repo_id` can also be a path to a local directory. commit_message (`str`, *optional*): Message to commit while pushing. Default to `"Upload {object}"`. private (`bool`, *optional*): Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists. token (`str`, *optional*): The token to use as HTTP bearer authorization for remote files. The token generated when running `huggingface-cli login` (stored in `~/.huggingface`). create_pr (`bool`, *optional*, defaults to `False`): Whether or not to create a PR with the uploaded files or directly commit. safe_serialization (`bool`, *optional*, defaults to `True`): Whether or not to convert the model weights to the `safetensors` format. variant (`str`, *optional*): If specified, weights are saved in the format `pytorch_model.<variant>.bin`. Examples: ```python from diffusers import UNet2DConditionModel unet = UNet2DConditionModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="unet") # Push the `unet` to your namespace with the name "my-finetuned-unet". unet.push_to_hub("my-finetuned-unet") # Push the `unet` to an organization with the name "my-finetuned-unet". unet.push_to_hub("your-org/my-finetuned-unet") ``` """ repo_id = create_repo(repo_id, private=private, token=token, exist_ok=True).repo_id # Create a new empty model card and eventually tag it model_card = load_or_create_model_card(repo_id, token=token) model_card = populate_model_card(model_card) # Save all files. save_kwargs = {"safe_serialization": safe_serialization} if "Scheduler" not in self.__class__.__name__: save_kwargs.update({"variant": variant}) with tempfile.TemporaryDirectory() as tmpdir: self.save_pretrained(tmpdir, **save_kwargs) # Update model card if needed: model_card.save(os.path.join(tmpdir, "README.md")) return self._upload_folder( tmpdir, repo_id, token=token, commit_message=commit_message, create_pr=create_pr, )
diffusers/src/diffusers/utils/hub_utils.py/0
{ "file_path": "diffusers/src/diffusers/utils/hub_utils.py", "repo_id": "diffusers", "token_count": 10219 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import torch from parameterized import parameterized from diffusers import AsymmetricAutoencoderKL from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, load_hf_numpy, require_torch_accelerator, require_torch_gpu, skip_mps, slow, torch_all_close, torch_device, ) from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class AutoencoderKLTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = AsymmetricAutoencoderKL main_input_name = "sample" base_precision = 1e-2 def get_asym_autoencoder_kl_config(self, block_out_channels=None, norm_num_groups=None): block_out_channels = block_out_channels or [2, 4] norm_num_groups = norm_num_groups or 2 init_dict = { "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels), "down_block_out_channels": block_out_channels, "layers_per_down_block": 1, "up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels), "up_block_out_channels": block_out_channels, "layers_per_up_block": 1, "act_fn": "silu", "latent_channels": 4, "norm_num_groups": norm_num_groups, "sample_size": 32, "scaling_factor": 0.18215, } return init_dict @property def dummy_input(self): batch_size = 4 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) mask = torch.ones((batch_size, 1) + sizes).to(torch_device) return {"sample": image, "mask": mask} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = self.get_asym_autoencoder_kl_config() inputs_dict = self.dummy_input return init_dict, inputs_dict @unittest.skip("Unsupported test.") def test_forward_with_norm_groups(self): pass @slow class AsymmetricAutoencoderKLIntegrationTests(unittest.TestCase): def get_file_format(self, seed, shape): return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False): dtype = torch.float16 if fp16 else torch.float32 image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) return image def get_sd_vae_model(self, model_id="cross-attention/asymmetric-autoencoder-kl-x-1-5", fp16=False): revision = "main" torch_dtype = torch.float32 model = AsymmetricAutoencoderKL.from_pretrained( model_id, torch_dtype=torch_dtype, revision=revision, ) model.to(torch_device).eval() return model def get_generator(self, seed=0): generator_device = "cpu" if not torch_device.startswith("cuda") else "cuda" if torch_device != "mps": return torch.Generator(device=generator_device).manual_seed(seed) return torch.manual_seed(seed) @parameterized.expand( [ # fmt: off [ 33, [-0.0336, 0.3011, 0.1764, 0.0087, -0.3401, 0.3645, -0.1247, 0.1205], [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], ], [ 47, [0.4400, 0.0543, 0.2873, 0.2946, 0.0553, 0.0839, -0.1585, 0.2529], [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], ], # fmt: on ] ) def test_stable_diffusion(self, seed, expected_slice, expected_slice_mps): model = self.get_sd_vae_model() image = self.get_sd_image(seed) generator = self.get_generator(seed) with torch.no_grad(): sample = model(image, generator=generator, sample_posterior=True).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) @parameterized.expand( [ # fmt: off [ 33, [-0.0340, 0.2870, 0.1698, -0.0105, -0.3448, 0.3529, -0.1321, 0.1097], [-0.0344, 0.2912, 0.1687, -0.0137, -0.3462, 0.3552, -0.1337, 0.1078], ], [ 47, [0.4397, 0.0550, 0.2873, 0.2946, 0.0567, 0.0855, -0.1580, 0.2531], [0.4397, 0.0550, 0.2873, 0.2946, 0.0567, 0.0855, -0.1580, 0.2531], ], # fmt: on ] ) def test_stable_diffusion_mode(self, seed, expected_slice, expected_slice_mps): model = self.get_sd_vae_model() image = self.get_sd_image(seed) with torch.no_grad(): sample = model(image).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) @parameterized.expand( [ # fmt: off [13, [-0.0521, -0.2939, 0.1540, -0.1855, -0.5936, -0.3138, -0.4579, -0.2275]], [37, [-0.1820, -0.4345, -0.0455, -0.2923, -0.8035, -0.5089, -0.4795, -0.3106]], # fmt: on ] ) @require_torch_accelerator @skip_mps def test_stable_diffusion_decode(self, seed, expected_slice): model = self.get_sd_vae_model() encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64)) with torch.no_grad(): sample = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] output_slice = sample[-1, -2:, :2, -2:].flatten().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=2e-3) @parameterized.expand([(13,), (16,), (37,)]) @require_torch_gpu @unittest.skipIf( not is_xformers_available(), reason="xformers is not required when using PyTorch 2.0.", ) def test_stable_diffusion_decode_xformers_vs_2_0(self, seed): model = self.get_sd_vae_model() encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64)) with torch.no_grad(): sample = model.decode(encoding).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): sample_2 = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] assert torch_all_close(sample, sample_2, atol=5e-2) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def test_stable_diffusion_encode_sample(self, seed, expected_slice): model = self.get_sd_vae_model() image = self.get_sd_image(seed) generator = self.get_generator(seed) with torch.no_grad(): dist = model.encode(image).latent_dist sample = dist.sample(generator=generator) assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] output_slice = sample[0, -1, -3:, -3:].flatten().cpu() expected_output_slice = torch.tensor(expected_slice) tolerance = 3e-3 if torch_device != "mps" else 1e-2 assert torch_all_close(output_slice, expected_output_slice, atol=tolerance)
diffusers/tests/models/autoencoders/test_models_asymmetric_autoencoder_kl.py/0
{ "file_path": "diffusers/tests/models/autoencoders/test_models_asymmetric_autoencoder_kl.py", "repo_id": "diffusers", "token_count": 4339 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import gc import inspect import json import os import re import tempfile import traceback import unittest import unittest.mock as mock import uuid from collections import defaultdict from typing import Dict, List, Optional, Tuple, Union import numpy as np import requests_mock import torch import torch.nn as nn from accelerate.utils.modeling import _get_proper_dtype, compute_module_sizes, dtype_byte_size from huggingface_hub import ModelCard, delete_repo, snapshot_download from huggingface_hub.utils import is_jinja_available from parameterized import parameterized from requests.exceptions import HTTPError from diffusers.models import UNet2DConditionModel from diffusers.models.attention_processor import ( AttnProcessor, AttnProcessor2_0, AttnProcessorNPU, XFormersAttnProcessor, ) from diffusers.training_utils import EMAModel from diffusers.utils import ( SAFE_WEIGHTS_INDEX_NAME, WEIGHTS_INDEX_NAME, is_peft_available, is_torch_npu_available, is_xformers_available, logging, ) from diffusers.utils.hub_utils import _add_variant from diffusers.utils.testing_utils import ( CaptureLogger, get_python_version, is_torch_compile, numpy_cosine_similarity_distance, require_torch_2, require_torch_accelerator, require_torch_accelerator_with_training, require_torch_gpu, require_torch_multi_gpu, run_test_in_subprocess, torch_all_close, torch_device, ) from diffusers.utils.torch_utils import get_torch_cuda_device_capability from ..others.test_utils import TOKEN, USER, is_staging_test if is_peft_available(): from peft.tuners.tuners_utils import BaseTunerLayer def caculate_expected_num_shards(index_map_path): with open(index_map_path) as f: weight_map_dict = json.load(f)["weight_map"] first_key = list(weight_map_dict.keys())[0] weight_loc = weight_map_dict[first_key] # e.g., diffusion_pytorch_model-00001-of-00002.safetensors expected_num_shards = int(weight_loc.split("-")[-1].split(".")[0]) return expected_num_shards def check_if_lora_correctly_set(model) -> bool: """ Checks if the LoRA layers are correctly set with peft """ for module in model.modules(): if isinstance(module, BaseTunerLayer): return True return False # Will be run via run_test_in_subprocess def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout): error = None try: init_dict, model_class = in_queue.get(timeout=timeout) model = model_class(**init_dict) model.to(torch_device) model = torch.compile(model) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) new_model = model_class.from_pretrained(tmpdirname) new_model.to(torch_device) assert new_model.__class__ == model_class except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() def named_persistent_module_tensors( module: nn.Module, recurse: bool = False, ): """ A helper function that gathers all the tensors (parameters + persistent buffers) of a given module. Args: module (`torch.nn.Module`): The module we want the tensors on. recurse (`bool`, *optional`, defaults to `False`): Whether or not to go look in every submodule or just return the direct parameters and buffers. """ yield from module.named_parameters(recurse=recurse) for named_buffer in module.named_buffers(recurse=recurse): name, _ = named_buffer # Get parent by splitting on dots and traversing the model parent = module if "." in name: parent_name = name.rsplit(".", 1)[0] for part in parent_name.split("."): parent = getattr(parent, part) name = name.split(".")[-1] if name not in parent._non_persistent_buffers_set: yield named_buffer def compute_module_persistent_sizes( model: nn.Module, dtype: Optional[Union[str, torch.device]] = None, special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None, ): """ Compute the size of each submodule of a given model (parameters + persistent buffers). """ if dtype is not None: dtype = _get_proper_dtype(dtype) dtype_size = dtype_byte_size(dtype) if special_dtypes is not None: special_dtypes = {key: _get_proper_dtype(dtyp) for key, dtyp in special_dtypes.items()} special_dtypes_size = {key: dtype_byte_size(dtyp) for key, dtyp in special_dtypes.items()} module_sizes = defaultdict(int) module_list = [] module_list = named_persistent_module_tensors(model, recurse=True) for name, tensor in module_list: if special_dtypes is not None and name in special_dtypes: size = tensor.numel() * special_dtypes_size[name] elif dtype is None: size = tensor.numel() * dtype_byte_size(tensor.dtype) elif str(tensor.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): # According to the code in set_module_tensor_to_device, these types won't be converted # so use their original size here size = tensor.numel() * dtype_byte_size(tensor.dtype) else: size = tensor.numel() * min(dtype_size, dtype_byte_size(tensor.dtype)) name_parts = name.split(".") for idx in range(len(name_parts) + 1): module_sizes[".".join(name_parts[:idx])] += size return module_sizes def cast_maybe_tensor_dtype(maybe_tensor, current_dtype, target_dtype): if torch.is_tensor(maybe_tensor): return maybe_tensor.to(target_dtype) if maybe_tensor.dtype == current_dtype else maybe_tensor if isinstance(maybe_tensor, dict): return {k: cast_maybe_tensor_dtype(v, current_dtype, target_dtype) for k, v in maybe_tensor.items()} if isinstance(maybe_tensor, list): return [cast_maybe_tensor_dtype(v, current_dtype, target_dtype) for v in maybe_tensor] return maybe_tensor class ModelUtilsTest(unittest.TestCase): def tearDown(self): super().tearDown() def test_accelerate_loading_error_message(self): with self.assertRaises(ValueError) as error_context: UNet2DConditionModel.from_pretrained("hf-internal-testing/stable-diffusion-broken", subfolder="unet") # make sure that error message states what keys are missing assert "conv_out.bias" in str(error_context.exception) @parameterized.expand( [ ("hf-internal-testing/tiny-stable-diffusion-pipe-variants-all-kinds", "unet", False), ("hf-internal-testing/tiny-stable-diffusion-pipe-variants-all-kinds", "unet", True), ("hf-internal-testing/tiny-sd-unet-with-sharded-ckpt", None, False), ("hf-internal-testing/tiny-sd-unet-with-sharded-ckpt", None, True), ] ) def test_variant_sharded_ckpt_legacy_format_raises_warning(self, repo_id, subfolder, use_local): def load_model(path): kwargs = {"variant": "fp16"} if subfolder: kwargs["subfolder"] = subfolder return UNet2DConditionModel.from_pretrained(path, **kwargs) with self.assertWarns(FutureWarning) as warning: if use_local: with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = snapshot_download(repo_id=repo_id) _ = load_model(tmpdirname) else: _ = load_model(repo_id) warning_message = str(warning.warnings[0].message) self.assertIn("This serialization format is now deprecated to standardize the serialization", warning_message) # Local tests are already covered down below. @parameterized.expand( [ ("hf-internal-testing/tiny-sd-unet-sharded-latest-format", None, "fp16"), ("hf-internal-testing/tiny-sd-unet-sharded-latest-format-subfolder", "unet", "fp16"), ("hf-internal-testing/tiny-sd-unet-sharded-no-variants", None, None), ("hf-internal-testing/tiny-sd-unet-sharded-no-variants-subfolder", "unet", None), ] ) def test_variant_sharded_ckpt_loads_from_hub(self, repo_id, subfolder, variant=None): def load_model(): kwargs = {} if variant: kwargs["variant"] = variant if subfolder: kwargs["subfolder"] = subfolder return UNet2DConditionModel.from_pretrained(repo_id, **kwargs) assert load_model() def test_cached_files_are_used_when_no_internet(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. orig_model = UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.request", return_value=response_mock): # Download this model to make sure it's in the cache. model = UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", local_files_only=True ) for p1, p2 in zip(orig_model.parameters(), model.parameters()): if p1.data.ne(p2.data).sum() > 0: assert False, "Parameters not the same!" @unittest.skip("Flaky behaviour on CI. Re-enable after migrating to new runners") @unittest.skipIf(torch_device == "mps", reason="Test not supported for MPS.") def test_one_request_upon_cached(self): use_safetensors = False with tempfile.TemporaryDirectory() as tmpdirname: with requests_mock.mock(real_http=True) as m: UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname, use_safetensors=use_safetensors, ) download_requests = [r.method for r in m.request_history] assert ( download_requests.count("HEAD") == 3 ), "3 HEAD requests one for config, one for model, and one for shard index file." assert download_requests.count("GET") == 2, "2 GET requests one for config, one for model" with requests_mock.mock(real_http=True) as m: UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname, use_safetensors=use_safetensors, ) cache_requests = [r.method for r in m.request_history] assert ( "HEAD" == cache_requests[0] and len(cache_requests) == 2 ), "We should call only `model_info` to check for commit hash and knowing if shard index is present." def test_weight_overwrite(self): with tempfile.TemporaryDirectory() as tmpdirname, self.assertRaises(ValueError) as error_context: UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname, in_channels=9, ) # make sure that error message states what keys are missing assert "Cannot load" in str(error_context.exception) with tempfile.TemporaryDirectory() as tmpdirname: model = UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname, in_channels=9, low_cpu_mem_usage=False, ignore_mismatched_sizes=True, ) assert model.config.in_channels == 9 class UNetTesterMixin: def test_forward_with_norm_groups(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["norm_num_groups"] = 16 init_dict["block_out_channels"] = (16, 32) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") class ModelTesterMixin: main_input_name = None # overwrite in model specific tester class base_precision = 1e-3 forward_requires_fresh_args = False model_split_percents = [0.5, 0.7, 0.9] uses_custom_attn_processor = False def check_device_map_is_respected(self, model, device_map): for param_name, param in model.named_parameters(): # Find device in device_map while len(param_name) > 0 and param_name not in device_map: param_name = ".".join(param_name.split(".")[:-1]) if param_name not in device_map: raise ValueError("device map is incomplete, it does not contain any device for `param_name`.") param_device = device_map[param_name] if param_device in ["cpu", "disk"]: self.assertEqual(param.device, torch.device("meta")) else: self.assertEqual(param.device, torch.device(param_device)) def test_from_save_pretrained(self, expected_max_diff=5e-5): if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) if hasattr(model, "set_default_attn_processor"): model.set_default_attn_processor() model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) new_model = self.model_class.from_pretrained(tmpdirname) if hasattr(new_model, "set_default_attn_processor"): new_model.set_default_attn_processor() new_model.to(torch_device) with torch.no_grad(): if self.forward_requires_fresh_args: image = model(**self.inputs_dict(0)) else: image = model(**inputs_dict) if isinstance(image, dict): image = image.to_tuple()[0] if self.forward_requires_fresh_args: new_image = new_model(**self.inputs_dict(0)) else: new_image = new_model(**inputs_dict) if isinstance(new_image, dict): new_image = new_image.to_tuple()[0] max_diff = (image - new_image).abs().max().item() self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") def test_getattr_is_correct(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) # save some things to test model.dummy_attribute = 5 model.register_to_config(test_attribute=5) logger = logging.get_logger("diffusers.models.modeling_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: assert hasattr(model, "dummy_attribute") assert getattr(model, "dummy_attribute") == 5 assert model.dummy_attribute == 5 # no warning should be thrown assert cap_logger.out == "" logger = logging.get_logger("diffusers.models.modeling_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: assert hasattr(model, "save_pretrained") fn = model.save_pretrained fn_1 = getattr(model, "save_pretrained") assert fn == fn_1 # no warning should be thrown assert cap_logger.out == "" # warning should be thrown with self.assertWarns(FutureWarning): assert model.test_attribute == 5 with self.assertWarns(FutureWarning): assert getattr(model, "test_attribute") == 5 with self.assertRaises(AttributeError) as error: model.does_not_exist assert str(error.exception) == f"'{type(model).__name__}' object has no attribute 'does_not_exist'" @unittest.skipIf( torch_device != "npu" or not is_torch_npu_available(), reason="torch npu flash attention is only available with NPU and `torch_npu` installed", ) def test_set_torch_npu_flash_attn_processor_determinism(self): torch.use_deterministic_algorithms(False) if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) if not hasattr(model, "set_attn_processor"): # If not has `set_attn_processor`, skip test return model.set_default_attn_processor() assert all(type(proc) == AttnProcessorNPU for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output = model(**self.inputs_dict(0))[0] else: output = model(**inputs_dict)[0] model.enable_npu_flash_attention() assert all(type(proc) == AttnProcessorNPU for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_2 = model(**self.inputs_dict(0))[0] else: output_2 = model(**inputs_dict)[0] model.set_attn_processor(AttnProcessorNPU()) assert all(type(proc) == AttnProcessorNPU for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_3 = model(**self.inputs_dict(0))[0] else: output_3 = model(**inputs_dict)[0] torch.use_deterministic_algorithms(True) assert torch.allclose(output, output_2, atol=self.base_precision) assert torch.allclose(output, output_3, atol=self.base_precision) assert torch.allclose(output_2, output_3, atol=self.base_precision) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_set_xformers_attn_processor_for_determinism(self): torch.use_deterministic_algorithms(False) if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) if not hasattr(model, "set_attn_processor"): # If not has `set_attn_processor`, skip test return if not hasattr(model, "set_default_attn_processor"): # If not has `set_attn_processor`, skip test return model.set_default_attn_processor() assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output = model(**self.inputs_dict(0))[0] else: output = model(**inputs_dict)[0] model.enable_xformers_memory_efficient_attention() assert all(type(proc) == XFormersAttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_2 = model(**self.inputs_dict(0))[0] else: output_2 = model(**inputs_dict)[0] model.set_attn_processor(XFormersAttnProcessor()) assert all(type(proc) == XFormersAttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_3 = model(**self.inputs_dict(0))[0] else: output_3 = model(**inputs_dict)[0] torch.use_deterministic_algorithms(True) assert torch.allclose(output, output_2, atol=self.base_precision) assert torch.allclose(output, output_3, atol=self.base_precision) assert torch.allclose(output_2, output_3, atol=self.base_precision) @require_torch_accelerator def test_set_attn_processor_for_determinism(self): if self.uses_custom_attn_processor: return torch.use_deterministic_algorithms(False) if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) if not hasattr(model, "set_attn_processor"): # If not has `set_attn_processor`, skip test return assert all(type(proc) == AttnProcessor2_0 for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_1 = model(**self.inputs_dict(0))[0] else: output_1 = model(**inputs_dict)[0] model.set_default_attn_processor() assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_2 = model(**self.inputs_dict(0))[0] else: output_2 = model(**inputs_dict)[0] model.set_attn_processor(AttnProcessor2_0()) assert all(type(proc) == AttnProcessor2_0 for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_4 = model(**self.inputs_dict(0))[0] else: output_4 = model(**inputs_dict)[0] model.set_attn_processor(AttnProcessor()) assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_5 = model(**self.inputs_dict(0))[0] else: output_5 = model(**inputs_dict)[0] torch.use_deterministic_algorithms(True) # make sure that outputs match assert torch.allclose(output_2, output_1, atol=self.base_precision) assert torch.allclose(output_2, output_4, atol=self.base_precision) assert torch.allclose(output_2, output_5, atol=self.base_precision) def test_from_save_pretrained_variant(self, expected_max_diff=5e-5): if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) if hasattr(model, "set_default_attn_processor"): model.set_default_attn_processor() model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, variant="fp16", safe_serialization=False) new_model = self.model_class.from_pretrained(tmpdirname, variant="fp16") if hasattr(new_model, "set_default_attn_processor"): new_model.set_default_attn_processor() # non-variant cannot be loaded with self.assertRaises(OSError) as error_context: self.model_class.from_pretrained(tmpdirname) # make sure that error message states what keys are missing assert "Error no file named diffusion_pytorch_model.bin found in directory" in str(error_context.exception) new_model.to(torch_device) with torch.no_grad(): if self.forward_requires_fresh_args: image = model(**self.inputs_dict(0)) else: image = model(**inputs_dict) if isinstance(image, dict): image = image.to_tuple()[0] if self.forward_requires_fresh_args: new_image = new_model(**self.inputs_dict(0)) else: new_image = new_model(**inputs_dict) if isinstance(new_image, dict): new_image = new_image.to_tuple()[0] max_diff = (image - new_image).abs().max().item() self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") @is_torch_compile @require_torch_2 @unittest.skipIf( get_python_version == (3, 12), reason="Torch Dynamo isn't yet supported for Python 3.12.", ) def test_from_save_pretrained_dynamo(self): init_dict, _ = self.prepare_init_args_and_inputs_for_common() inputs = [init_dict, self.model_class] run_test_in_subprocess(test_case=self, target_func=_test_from_save_pretrained_dynamo, inputs=inputs) def test_from_save_pretrained_dtype(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() for dtype in [torch.float32, torch.float16, torch.bfloat16]: if torch_device == "mps" and dtype == torch.bfloat16: continue with tempfile.TemporaryDirectory() as tmpdirname: model.to(dtype) model.save_pretrained(tmpdirname, safe_serialization=False) new_model = self.model_class.from_pretrained(tmpdirname, low_cpu_mem_usage=True, torch_dtype=dtype) assert new_model.dtype == dtype new_model = self.model_class.from_pretrained(tmpdirname, low_cpu_mem_usage=False, torch_dtype=dtype) assert new_model.dtype == dtype def test_determinism(self, expected_max_diff=1e-5): if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): if self.forward_requires_fresh_args: first = model(**self.inputs_dict(0)) else: first = model(**inputs_dict) if isinstance(first, dict): first = first.to_tuple()[0] if self.forward_requires_fresh_args: second = model(**self.inputs_dict(0)) else: second = model(**inputs_dict) if isinstance(second, dict): second = second.to_tuple()[0] out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, expected_max_diff) def test_output(self, expected_output_shape=None): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) # input & output have to have the same shape input_tensor = inputs_dict[self.main_input_name] if expected_output_shape is None: expected_shape = input_tensor.shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") else: self.assertEqual(output.shape, expected_output_shape, "Input and output shapes do not match") def test_model_from_pretrained(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() # test if the model can be loaded from the config # and has all the expected shape with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) new_model = self.model_class.from_pretrained(tmpdirname) new_model.to(torch_device) new_model.eval() # check if all parameters shape are the same for param_name in model.state_dict().keys(): param_1 = model.state_dict()[param_name] param_2 = new_model.state_dict()[param_name] self.assertEqual(param_1.shape, param_2.shape) with torch.no_grad(): output_1 = model(**inputs_dict) if isinstance(output_1, dict): output_1 = output_1.to_tuple()[0] output_2 = new_model(**inputs_dict) if isinstance(output_2, dict): output_2 = output_2.to_tuple()[0] self.assertEqual(output_1.shape, output_2.shape) @require_torch_accelerator_with_training def test_training(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.train() output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] input_tensor = inputs_dict[self.main_input_name] noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device) loss = torch.nn.functional.mse_loss(output, noise) loss.backward() @require_torch_accelerator_with_training def test_ema_training(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.train() ema_model = EMAModel(model.parameters()) output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] input_tensor = inputs_dict[self.main_input_name] noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device) loss = torch.nn.functional.mse_loss(output, noise) loss.backward() ema_model.step(model.parameters()) def test_outputs_equivalence(self): def set_nan_tensor_to_zero(t): # Temporary fallback until `aten::_index_put_impl_` is implemented in mps # Track progress in https://github.com/pytorch/pytorch/issues/77764 device = t.device if device.type == "mps": t = t.to("cpu") t[t != t] = 0 return t.to(device) def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): if self.forward_requires_fresh_args: outputs_dict = model(**self.inputs_dict(0)) outputs_tuple = model(**self.inputs_dict(0), return_dict=False) else: outputs_dict = model(**inputs_dict) outputs_tuple = model(**inputs_dict, return_dict=False) recursive_check(outputs_tuple, outputs_dict) @require_torch_accelerator_with_training def test_enable_disable_gradient_checkpointing(self): if not self.model_class._supports_gradient_checkpointing: return # Skip test if model does not support gradient checkpointing init_dict, _ = self.prepare_init_args_and_inputs_for_common() # at init model should have gradient checkpointing disabled model = self.model_class(**init_dict) self.assertFalse(model.is_gradient_checkpointing) # check enable works model.enable_gradient_checkpointing() self.assertTrue(model.is_gradient_checkpointing) # check disable works model.disable_gradient_checkpointing() self.assertFalse(model.is_gradient_checkpointing) @require_torch_accelerator_with_training def test_effective_gradient_checkpointing(self, loss_tolerance=1e-5, param_grad_tol=5e-5, skip: set[str] = {}): if not self.model_class._supports_gradient_checkpointing: return # Skip test if model does not support gradient checkpointing # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() inputs_dict_copy = copy.deepcopy(inputs_dict) torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) assert not model.is_gradient_checkpointing and model.training out = model(**inputs_dict).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() labels = torch.randn_like(out) loss = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing torch.manual_seed(0) model_2 = self.model_class(**init_dict) # clone model model_2.load_state_dict(model.state_dict()) model_2.to(torch_device) model_2.enable_gradient_checkpointing() assert model_2.is_gradient_checkpointing and model_2.training out_2 = model_2(**inputs_dict_copy).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_2.zero_grad() loss_2 = (out_2 - labels).mean() loss_2.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_2).abs() < loss_tolerance) named_params = dict(model.named_parameters()) named_params_2 = dict(model_2.named_parameters()) for name, param in named_params.items(): if "post_quant_conv" in name: continue if name in skip: continue self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=param_grad_tol)) @unittest.skipIf(torch_device == "mps", "This test is not supported for MPS devices.") def test_gradient_checkpointing_is_applied( self, expected_set=None, attention_head_dim=None, num_attention_heads=None, block_out_channels=None ): if not self.model_class._supports_gradient_checkpointing: return # Skip test if model does not support gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() if attention_head_dim is not None: init_dict["attention_head_dim"] = attention_head_dim if num_attention_heads is not None: init_dict["num_attention_heads"] = num_attention_heads if block_out_channels is not None: init_dict["block_out_channels"] = block_out_channels model_class_copy = copy.copy(self.model_class) model = model_class_copy(**init_dict) model.enable_gradient_checkpointing() modules_with_gc_enabled = {} for submodule in model.modules(): if hasattr(submodule, "gradient_checkpointing"): self.assertTrue(submodule.gradient_checkpointing) modules_with_gc_enabled[submodule.__class__.__name__] = True assert set(modules_with_gc_enabled.keys()) == expected_set assert all(modules_with_gc_enabled.values()), "All modules should be enabled" def test_deprecated_kwargs(self): has_kwarg_in_model_class = "kwargs" in inspect.signature(self.model_class.__init__).parameters has_deprecated_kwarg = len(self.model_class._deprecated_kwargs) > 0 if has_kwarg_in_model_class and not has_deprecated_kwarg: raise ValueError( f"{self.model_class} has `**kwargs` in its __init__ method but has not defined any deprecated kwargs" " under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if there are" " no deprecated arguments or add the deprecated argument with `_deprecated_kwargs =" " [<deprecated_argument>]`" ) if not has_kwarg_in_model_class and has_deprecated_kwarg: raise ValueError( f"{self.model_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated kwargs" " under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs` argument to" f" {self.model_class}.__init__ if there are deprecated arguments or remove the deprecated argument" " from `_deprecated_kwargs = [<deprecated_argument>]`" ) @parameterized.expand([True, False]) @torch.no_grad() @unittest.skipIf(not is_peft_available(), "Only with PEFT") def test_save_load_lora_adapter(self, use_dora=False): import safetensors from peft import LoraConfig from peft.utils import get_peft_model_state_dict from diffusers.loaders.peft import PeftAdapterMixin init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) if not issubclass(model.__class__, PeftAdapterMixin): return torch.manual_seed(0) output_no_lora = model(**inputs_dict, return_dict=False)[0] denoiser_lora_config = LoraConfig( r=4, lora_alpha=4, target_modules=["to_q", "to_k", "to_v", "to_out.0"], init_lora_weights=False, use_dora=use_dora, ) model.add_adapter(denoiser_lora_config) self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly") torch.manual_seed(0) outputs_with_lora = model(**inputs_dict, return_dict=False)[0] self.assertFalse(torch.allclose(output_no_lora, outputs_with_lora, atol=1e-4, rtol=1e-4)) with tempfile.TemporaryDirectory() as tmpdir: model.save_lora_adapter(tmpdir) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) state_dict_loaded = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) model.unload_lora() self.assertFalse(check_if_lora_correctly_set(model), "LoRA layers not set correctly") model.load_lora_adapter(tmpdir, prefix=None, use_safetensors=True) state_dict_retrieved = get_peft_model_state_dict(model, adapter_name="default_0") for k in state_dict_loaded: loaded_v = state_dict_loaded[k] retrieved_v = state_dict_retrieved[k].to(loaded_v.device) self.assertTrue(torch.allclose(loaded_v, retrieved_v)) self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly") torch.manual_seed(0) outputs_with_lora_2 = model(**inputs_dict, return_dict=False)[0] self.assertFalse(torch.allclose(output_no_lora, outputs_with_lora_2, atol=1e-4, rtol=1e-4)) self.assertTrue(torch.allclose(outputs_with_lora, outputs_with_lora_2, atol=1e-4, rtol=1e-4)) @unittest.skipIf(not is_peft_available(), "Only with PEFT") def test_wrong_adapter_name_raises_error(self): from peft import LoraConfig from diffusers.loaders.peft import PeftAdapterMixin init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) if not issubclass(model.__class__, PeftAdapterMixin): return denoiser_lora_config = LoraConfig( r=4, lora_alpha=4, target_modules=["to_q", "to_k", "to_v", "to_out.0"], init_lora_weights=False, use_dora=False, ) model.add_adapter(denoiser_lora_config) self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly") with tempfile.TemporaryDirectory() as tmpdir: wrong_name = "foo" with self.assertRaises(ValueError) as err_context: model.save_lora_adapter(tmpdir, adapter_name=wrong_name) self.assertTrue(f"Adapter name {wrong_name} not found in the model." in str(err_context.exception)) @require_torch_accelerator def test_cpu_offload(self): config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() if model._no_split_modules is None: return model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict) model_size = compute_module_sizes(model)[""] # We test several splits of sizes to make sure it works. max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) for max_size in max_gpu_sizes: max_memory = {0: max_size, "cpu": model_size * 2} new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) # Making sure part of the model will actually end up offloaded self.assertSetEqual(set(new_model.hf_device_map.values()), {0, "cpu"}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_torch_accelerator def test_disk_offload_without_safetensors(self): config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() if model._no_split_modules is None: return model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict) model_size = compute_module_sizes(model)[""] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir, safe_serialization=False) with self.assertRaises(ValueError): max_size = int(self.model_split_percents[0] * model_size) max_memory = {0: max_size, "cpu": max_size} # This errors out because it's missing an offload folder new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) max_size = int(self.model_split_percents[0] * model_size) max_memory = {0: max_size, "cpu": max_size} new_model = self.model_class.from_pretrained( tmp_dir, device_map="auto", max_memory=max_memory, offload_folder=tmp_dir ) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_torch_accelerator def test_disk_offload_with_safetensors(self): config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() if model._no_split_modules is None: return model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict) model_size = compute_module_sizes(model)[""] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) max_size = int(self.model_split_percents[0] * model_size) max_memory = {0: max_size, "cpu": max_size} new_model = self.model_class.from_pretrained( tmp_dir, device_map="auto", offload_folder=tmp_dir, max_memory=max_memory ) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_torch_multi_gpu def test_model_parallelism(self): config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() if model._no_split_modules is None: return model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict) model_size = compute_module_sizes(model)[""] # We test several splits of sizes to make sure it works. max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) for max_size in max_gpu_sizes: max_memory = {0: max_size, 1: model_size * 2, "cpu": model_size * 2} new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) # Making sure part of the model will actually end up offloaded self.assertSetEqual(set(new_model.hf_device_map.values()), {0, 1}) print(f" new_model.hf_device_map:{new_model.hf_device_map}") self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_torch_accelerator def test_sharded_checkpoints(self): torch.manual_seed(0) config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() model = model.to(torch_device) base_output = model(**inputs_dict) model_size = compute_module_persistent_sizes(model)[""] max_shard_size = int((model_size * 0.75) / (2**10)) # Convert to KB as these test models are small. with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir, max_shard_size=f"{max_shard_size}KB") self.assertTrue(os.path.exists(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) # Now check if the right number of shards exists. First, let's get the number of shards. # Since this number can be dependent on the model being tested, it's important that we calculate it # instead of hardcoding it. expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME)) actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(".safetensors")]) self.assertTrue(actual_num_shards == expected_num_shards) new_model = self.model_class.from_pretrained(tmp_dir).eval() new_model = new_model.to(torch_device) torch.manual_seed(0) if "generator" in inputs_dict: _, inputs_dict = self.prepare_init_args_and_inputs_for_common() new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_torch_accelerator def test_sharded_checkpoints_with_variant(self): torch.manual_seed(0) config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() model = model.to(torch_device) base_output = model(**inputs_dict) model_size = compute_module_persistent_sizes(model)[""] max_shard_size = int((model_size * 0.75) / (2**10)) # Convert to KB as these test models are small. variant = "fp16" with tempfile.TemporaryDirectory() as tmp_dir: # It doesn't matter if the actual model is in fp16 or not. Just adding the variant and # testing if loading works with the variant when the checkpoint is sharded should be # enough. model.cpu().save_pretrained(tmp_dir, max_shard_size=f"{max_shard_size}KB", variant=variant) index_filename = _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant) self.assertTrue(os.path.exists(os.path.join(tmp_dir, index_filename))) # Now check if the right number of shards exists. First, let's get the number of shards. # Since this number can be dependent on the model being tested, it's important that we calculate it # instead of hardcoding it. expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, index_filename)) actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(".safetensors")]) self.assertTrue(actual_num_shards == expected_num_shards) new_model = self.model_class.from_pretrained(tmp_dir, variant=variant).eval() new_model = new_model.to(torch_device) torch.manual_seed(0) if "generator" in inputs_dict: _, inputs_dict = self.prepare_init_args_and_inputs_for_common() new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_torch_accelerator def test_sharded_checkpoints_device_map(self): config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() if model._no_split_modules is None: return model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict) model_size = compute_module_persistent_sizes(model)[""] max_shard_size = int((model_size * 0.75) / (2**10)) # Convert to KB as these test models are small. with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir, max_shard_size=f"{max_shard_size}KB") self.assertTrue(os.path.exists(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) # Now check if the right number of shards exists. First, let's get the number of shards. # Since this number can be dependent on the model being tested, it's important that we calculate it # instead of hardcoding it. expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME)) actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(".safetensors")]) self.assertTrue(actual_num_shards == expected_num_shards) new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto") torch.manual_seed(0) if "generator" in inputs_dict: _, inputs_dict = self.prepare_init_args_and_inputs_for_common() new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) # This test is okay without a GPU because we're not running any execution. We're just serializing # and check if the resultant files are following an expected format. def test_variant_sharded_ckpt_right_format(self): for use_safe in [True, False]: extension = ".safetensors" if use_safe else ".bin" config, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() model_size = compute_module_persistent_sizes(model)[""] max_shard_size = int((model_size * 0.75) / (2**10)) # Convert to KB as these test models are small. variant = "fp16" with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained( tmp_dir, variant=variant, max_shard_size=f"{max_shard_size}KB", safe_serialization=use_safe ) index_variant = _add_variant(SAFE_WEIGHTS_INDEX_NAME if use_safe else WEIGHTS_INDEX_NAME, variant) self.assertTrue(os.path.exists(os.path.join(tmp_dir, index_variant))) # Now check if the right number of shards exists. First, let's get the number of shards. # Since this number can be dependent on the model being tested, it's important that we calculate it # instead of hardcoding it. expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, index_variant)) actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(extension)]) self.assertTrue(actual_num_shards == expected_num_shards) # Check if the variant is present as a substring in the checkpoints. shard_files = [ file for file in os.listdir(tmp_dir) if file.endswith(extension) or ("index" in file and "json" in file) ] assert all(variant in f for f in shard_files) # Check if the sharded checkpoints were serialized in the right format. shard_files = [file for file in os.listdir(tmp_dir) if file.endswith(extension)] # Example: diffusion_pytorch_model.fp16-00001-of-00002.safetensors assert all(f.split(".")[1].split("-")[0] == variant for f in shard_files) def test_layerwise_casting_inference(self): from diffusers.hooks.layerwise_casting import DEFAULT_SKIP_MODULES_PATTERN, SUPPORTED_PYTORCH_LAYERS torch.manual_seed(0) config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() model = model.to(torch_device) base_slice = model(**inputs_dict)[0].flatten().detach().cpu().numpy() def check_linear_dtype(module, storage_dtype, compute_dtype): patterns_to_check = DEFAULT_SKIP_MODULES_PATTERN if getattr(module, "_skip_layerwise_casting_patterns", None) is not None: patterns_to_check += tuple(module._skip_layerwise_casting_patterns) for name, submodule in module.named_modules(): if not isinstance(submodule, SUPPORTED_PYTORCH_LAYERS): continue dtype_to_check = storage_dtype if any(re.search(pattern, name) for pattern in patterns_to_check): dtype_to_check = compute_dtype if getattr(submodule, "weight", None) is not None: self.assertEqual(submodule.weight.dtype, dtype_to_check) if getattr(submodule, "bias", None) is not None: self.assertEqual(submodule.bias.dtype, dtype_to_check) def test_layerwise_casting(storage_dtype, compute_dtype): torch.manual_seed(0) config, inputs_dict = self.prepare_init_args_and_inputs_for_common() inputs_dict = cast_maybe_tensor_dtype(inputs_dict, torch.float32, compute_dtype) model = self.model_class(**config).eval() model = model.to(torch_device, dtype=compute_dtype) model.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype) check_linear_dtype(model, storage_dtype, compute_dtype) output = model(**inputs_dict)[0].float().flatten().detach().cpu().numpy() # The precision test is not very important for fast tests. In most cases, the outputs will not be the same. # We just want to make sure that the layerwise casting is working as expected. self.assertTrue(numpy_cosine_similarity_distance(base_slice, output) < 1.0) test_layerwise_casting(torch.float16, torch.float32) test_layerwise_casting(torch.float8_e4m3fn, torch.float32) test_layerwise_casting(torch.float8_e5m2, torch.float32) test_layerwise_casting(torch.float8_e4m3fn, torch.bfloat16) @require_torch_gpu def test_layerwise_casting_memory(self): MB_TOLERANCE = 0.2 LEAST_COMPUTE_CAPABILITY = 8.0 def reset_memory_stats(): gc.collect() torch.cuda.synchronize() torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() def get_memory_usage(storage_dtype, compute_dtype): torch.manual_seed(0) config, inputs_dict = self.prepare_init_args_and_inputs_for_common() inputs_dict = cast_maybe_tensor_dtype(inputs_dict, torch.float32, compute_dtype) model = self.model_class(**config).eval() model = model.to(torch_device, dtype=compute_dtype) model.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype) reset_memory_stats() model(**inputs_dict) model_memory_footprint = model.get_memory_footprint() peak_inference_memory_allocated_mb = torch.cuda.max_memory_allocated() / 1024**2 return model_memory_footprint, peak_inference_memory_allocated_mb fp32_memory_footprint, fp32_max_memory = get_memory_usage(torch.float32, torch.float32) fp8_e4m3_fp32_memory_footprint, fp8_e4m3_fp32_max_memory = get_memory_usage(torch.float8_e4m3fn, torch.float32) fp8_e4m3_bf16_memory_footprint, fp8_e4m3_bf16_max_memory = get_memory_usage( torch.float8_e4m3fn, torch.bfloat16 ) compute_capability = get_torch_cuda_device_capability() self.assertTrue(fp8_e4m3_bf16_memory_footprint < fp8_e4m3_fp32_memory_footprint < fp32_memory_footprint) # NOTE: the following assertion would fail on our CI (running Tesla T4) due to bf16 using more memory than fp32. # On other devices, such as DGX (Ampere) and Audace (Ada), the test passes. So, we conditionally check it. if compute_capability and compute_capability >= LEAST_COMPUTE_CAPABILITY: self.assertTrue(fp8_e4m3_bf16_max_memory < fp8_e4m3_fp32_max_memory) # On this dummy test case with a small model, sometimes fp8_e4m3_fp32 max memory usage is higher than fp32 by a few # bytes. This only happens for some models, so we allow a small tolerance. # For any real model being tested, the order would be fp8_e4m3_bf16 < fp8_e4m3_fp32 < fp32. self.assertTrue( fp8_e4m3_fp32_max_memory < fp32_max_memory or abs(fp8_e4m3_fp32_max_memory - fp32_max_memory) < MB_TOLERANCE ) @is_staging_test class ModelPushToHubTester(unittest.TestCase): identifier = uuid.uuid4() repo_id = f"test-model-{identifier}" org_repo_id = f"valid_org/{repo_id}-org" def test_push_to_hub(self): model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) model.push_to_hub(self.repo_id, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=TOKEN, repo_id=self.repo_id) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id=self.repo_id, push_to_hub=True, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(self.repo_id, token=TOKEN) def test_push_to_hub_in_organization(self): model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) model.push_to_hub(self.org_repo_id, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=TOKEN, repo_id=self.org_repo_id) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, push_to_hub=True, token=TOKEN, repo_id=self.org_repo_id) new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(self.org_repo_id, token=TOKEN) @unittest.skipIf( not is_jinja_available(), reason="Model card tests cannot be performed without Jinja installed.", ) def test_push_to_hub_library_name(self): model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) model.push_to_hub(self.repo_id, token=TOKEN) model_card = ModelCard.load(f"{USER}/{self.repo_id}", token=TOKEN).data assert model_card.library_name == "diffusers" # Reset repo delete_repo(self.repo_id, token=TOKEN)
diffusers/tests/models/test_modeling_common.py/0
{ "file_path": "diffusers/tests/models/test_modeling_common.py", "repo_id": "diffusers", "token_count": 29729 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import LuminaNextDiT2DModel from diffusers.utils.testing_utils import ( enable_full_determinism, torch_device, ) from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class LuminaNextDiT2DModelTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = LuminaNextDiT2DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): """ Args: None Returns: Dict: Dictionary of dummy input tensors """ batch_size = 2 # N num_channels = 4 # C height = width = 16 # H, W embedding_dim = 32 # D sequence_length = 16 # L hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) timestep = torch.rand(size=(batch_size,)).to(torch_device) encoder_mask = torch.randn(size=(batch_size, sequence_length)).to(torch_device) image_rotary_emb = torch.randn((384, 384, 4)).to(torch_device) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "timestep": timestep, "encoder_mask": encoder_mask, "image_rotary_emb": image_rotary_emb, "cross_attention_kwargs": {}, } @property def input_shape(self): """ Args: None Returns: Tuple: (int, int, int) """ return (4, 16, 16) @property def output_shape(self): """ Args: None Returns: Tuple: (int, int, int) """ return (4, 16, 16) def prepare_init_args_and_inputs_for_common(self): """ Args: None Returns: Tuple: (Dict, Dict) """ init_dict = { "sample_size": 16, "patch_size": 2, "in_channels": 4, "hidden_size": 24, "num_layers": 2, "num_attention_heads": 3, "num_kv_heads": 1, "multiple_of": 16, "ffn_dim_multiplier": None, "norm_eps": 1e-5, "learn_sigma": False, "qk_norm": True, "cross_attention_dim": 32, "scaling_factor": 1.0, } inputs_dict = self.dummy_input return init_dict, inputs_dict
diffusers/tests/models/transformers/test_models_transformer_lumina.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_lumina.py", "repo_id": "diffusers", "token_count": 1481 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import shutil import sys import tempfile import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. REFERENCE_CODE = """ \""" Output class for the scheduler's `step` function output. Args: prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample `(x_{0})` based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \""" prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None """ class CopyCheckTester(unittest.TestCase): def setUp(self): self.diffusers_dir = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir, "schedulers/")) check_copies.DIFFUSERS_PATH = self.diffusers_dir shutil.copy( os.path.join(git_repo_path, "src/diffusers/schedulers/scheduling_ddpm.py"), os.path.join(self.diffusers_dir, "schedulers/scheduling_ddpm.py"), ) def tearDown(self): check_copies.DIFFUSERS_PATH = "src/diffusers" shutil.rmtree(self.diffusers_dir) def check_copy_consistency(self, comment, class_name, class_code, overwrite_result=None): code = comment + f"\nclass {class_name}(nn.Module):\n" + class_code if overwrite_result is not None: expected = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result code = check_copies.run_ruff(code) fname = os.path.join(self.diffusers_dir, "new_code.py") with open(fname, "w", newline="\n") as f: f.write(code) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(fname)) == 0) else: check_copies.is_copy_consistent(f.name, overwrite=True) with open(fname, "r") as f: self.assertTrue(f.read(), expected) def test_find_code_in_diffusers(self): code = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput") self.assertEqual(code, REFERENCE_CODE) def test_is_copy_consistent(self): # Base copy consistency self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput", "DDPMSchedulerOutput", REFERENCE_CODE + "\n", ) # With no empty line at the end self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput", "DDPMSchedulerOutput", REFERENCE_CODE, ) # Copy consistency with rename self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test", "TestSchedulerOutput", re.sub("DDPM", "Test", REFERENCE_CODE), ) # Copy consistency with a really long name long_class_name = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}", f"{long_class_name}SchedulerOutput", re.sub("Bert", long_class_name, REFERENCE_CODE), ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test", "TestSchedulerOutput", REFERENCE_CODE, overwrite_result=re.sub("DDPM", "Test", REFERENCE_CODE), )
diffusers/tests/others/test_check_copies.py/0
{ "file_path": "diffusers/tests/others/test_check_copies.py", "repo_id": "diffusers", "token_count": 2028 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import AmusedImg2ImgPipeline, AmusedScheduler, UVit2DModel, VQModel from diffusers.utils import load_image from diffusers.utils.testing_utils import ( enable_full_determinism, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class AmusedImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AmusedImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} def get_dummy_components(self): torch.manual_seed(0) transformer = UVit2DModel( hidden_size=8, use_bias=False, hidden_dropout=0.0, cond_embed_dim=8, micro_cond_encode_dim=2, micro_cond_embed_dim=10, encoder_hidden_size=8, vocab_size=32, codebook_size=8, in_channels=8, block_out_channels=8, num_res_blocks=1, downsample=True, upsample=True, block_num_heads=1, num_hidden_layers=1, num_attention_heads=1, attention_dropout=0.0, intermediate_size=8, layer_norm_eps=1e-06, ln_elementwise_affine=True, ) scheduler = AmusedScheduler(mask_token_id=31) torch.manual_seed(0) vqvae = VQModel( act_fn="silu", block_out_channels=[8], down_block_types=["DownEncoderBlock2D"], in_channels=3, latent_channels=8, layers_per_block=1, norm_num_groups=8, num_vq_embeddings=32, out_channels=3, sample_size=8, up_block_types=["UpDecoderBlock2D"], mid_block_add_attention=False, lookup_from_codebook=True, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=8, intermediate_size=8, layer_norm_eps=1e-05, num_attention_heads=1, num_hidden_layers=1, pad_token_id=1, vocab_size=1000, projection_dim=8, ) text_encoder = CLIPTextModelWithProjection(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "transformer": transformer, "scheduler": scheduler, "vqvae": vqvae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = torch.full((1, 3, 4, 4), 1.0, dtype=torch.float32, device=device) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "np", "image": image, } return inputs def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") def test_inference_batch_single_identical(self): ... @slow @require_torch_accelerator class AmusedImg2ImgPipelineSlowTests(unittest.TestCase): def test_amused_256(self): pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-256") pipe.to(torch_device) image = ( load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg") .resize((256, 256)) .convert("RGB") ) image = pipe( "winter mountains", image, generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np", ).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.9993, 1.0, 0.9996, 1.0, 0.9995, 0.9925, 0.999, 0.9954, 1.0]) assert np.abs(image_slice - expected_slice).max() < 0.01 def test_amused_256_fp16(self): pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-256", torch_dtype=torch.float16, variant="fp16") pipe.to(torch_device) image = ( load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg") .resize((256, 256)) .convert("RGB") ) image = pipe( "winter mountains", image, generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np", ).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.998, 0.998, 0.994, 0.9944, 0.996, 0.9908, 1.0, 1.0, 0.9986]) assert np.abs(image_slice - expected_slice).max() < 0.01 def test_amused_512(self): pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-512") pipe.to(torch_device) image = ( load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg") .resize((512, 512)) .convert("RGB") ) image = pipe( "winter mountains", image, generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np", ).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.2809, 0.1879, 0.2027, 0.2418, 0.1852, 0.2145, 0.2484, 0.2425, 0.2317]) assert np.abs(image_slice - expected_slice).max() < 0.1 def test_amused_512_fp16(self): pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-512", variant="fp16", torch_dtype=torch.float16) pipe.to(torch_device) image = ( load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg") .resize((512, 512)) .convert("RGB") ) image = pipe( "winter mountains", image, generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np", ).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.2795, 0.1867, 0.2028, 0.2450, 0.1856, 0.2140, 0.2473, 0.2406, 0.2313]) assert np.abs(image_slice - expected_slice).max() < 0.1
diffusers/tests/pipelines/amused/test_amused_img2img.py/0
{ "file_path": "diffusers/tests/pipelines/amused/test_amused_img2img.py", "repo_id": "diffusers", "token_count": 3928 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This model implementation is heavily based on: import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetInpaintPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils import load_image from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class ControlNetInpaintPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset({"control_image"}) # skip `image` and `mask` for now, only test for control_image image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) init_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) init_image = init_image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(init_image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(init_image + 4)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, "mask_image": mask_image, "control_image": control_image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) class ControlNetSimpleInpaintPipelineFastTests(ControlNetInpaintPipelineFastTests): pipeline_class = StableDiffusionControlNetInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components class MultiControlNetInpaintPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal_(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] init_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) init_image = init_image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(init_image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(init_image + 4)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, "mask_image": mask_image, "control_image": control_image, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass @slow @require_torch_accelerator class ControlNetInpaintPipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( "botp/stable-diffusion-v1-5-inpainting", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload(device=torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ).resize((512, 512)) prompt = "pitch black hole" control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) output = pipe( prompt, image=image, mask_image=mask_image, control_image=control_image, generator=generator, output_type="np", num_inference_steps=3, ) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/inpaint.npy" ) assert np.abs(expected_image - image).max() < 9e-2 def test_inpaint(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint") pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload(device=torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(33) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png" ) init_image = init_image.resize((512, 512)) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png" ) mask_image = mask_image.resize((512, 512)) prompt = "a handsome man with ray-ban sunglasses" def make_inpaint_condition(image, image_mask): image = np.array(image.convert("RGB")).astype(np.float32) / 255.0 image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0 assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size" image[image_mask > 0.5] = -1.0 # set as masked pixel image = np.expand_dims(image, 0).transpose(0, 3, 1, 2) image = torch.from_numpy(image) return image control_image = make_inpaint_condition(init_image, mask_image) output = pipe( prompt, image=init_image, mask_image=mask_image, control_image=control_image, guidance_scale=9.0, eta=1.0, generator=generator, num_inference_steps=20, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/boy_ray_ban.npy" ) assert numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) < 1e-2
diffusers/tests/pipelines/controlnet/test_controlnet_inpaint.py/0
{ "file_path": "diffusers/tests/pipelines/controlnet/test_controlnet_inpaint.py", "repo_id": "diffusers", "token_count": 9760 }
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AsymmetricAutoencoderKL, AutoencoderKL, AutoencoderTiny, ConsistencyDecoderVAE, ControlNetXSAdapter, EulerDiscreteScheduler, StableDiffusionXLControlNetXSPipeline, UNet2DConditionModel, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, load_image, require_torch_accelerator, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ...models.autoencoders.vae import ( get_asym_autoencoder_kl_config, get_autoencoder_kl_config, get_autoencoder_tiny_config, get_consistency_vae_config, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, ) enable_full_determinism() class StableDiffusionXLControlNetXSPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionXLControlNetXSPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS test_attention_slicing = False test_layerwise_casting = True def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=16, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), use_linear_projection=True, norm_num_groups=4, # SD2-specific config below attention_head_dim=(2, 4), addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=56, # 6 * 8 (addition_time_embed_dim) + 8 (cross_attention_dim) cross_attention_dim=8, ) torch.manual_seed(0) controlnet = ControlNetXSAdapter.from_unet( unet=unet, size_ratio=0.5, learn_time_embedding=True, conditioning_embedding_out_channels=(2, 2), ) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=4, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=8, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "feature_extractor": None, } return components # Copied from test_controlnet_sdxl.py def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 image = randn_tensor( (1, 3, 8 * controlnet_embedder_scale_factor, 8 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, } return inputs # Copied from test_controlnet_sdxl.py def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) # Copied from test_controlnet_sdxl.py def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) # Copied from test_controlnet_sdxl.py def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) @require_torch_accelerator # Copied from test_controlnet_sdxl.py def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload(device=torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload(device=torch_device) pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 # Copied from test_controlnet_sdxl.py def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # Copied from test_stable_diffusion_xl.py def test_stable_diffusion_xl_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 2 * [inputs["prompt"]] inputs["num_images_per_prompt"] = 2 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) prompt = 2 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1.1e-4 # Copied from test_stable_diffusion_xl.py def test_save_load_optional_components(self): self._test_save_load_optional_components() # Copied from test_controlnetxs.py def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # pipeline creates a new UNetControlNetXSModel under the hood. So we need to check the dtype from pipe.components model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(dtype=torch.float16) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) def test_multi_vae(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) block_out_channels = pipe.vae.config.block_out_channels norm_num_groups = pipe.vae.config.norm_num_groups vae_classes = [AutoencoderKL, AsymmetricAutoencoderKL, ConsistencyDecoderVAE, AutoencoderTiny] configs = [ get_autoencoder_kl_config(block_out_channels, norm_num_groups), get_asym_autoencoder_kl_config(block_out_channels, norm_num_groups), get_consistency_vae_config(block_out_channels, norm_num_groups), get_autoencoder_tiny_config(block_out_channels), ] out_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] for vae_cls, config in zip(vae_classes, configs): vae = vae_cls(**config) vae = vae.to(torch_device) components["vae"] = vae vae_pipe = self.pipeline_class(**components) # pipeline creates a new UNetControlNetXSModel under the hood, which aren't on device. # So we need to move the new pipe to device. vae_pipe.to(torch_device) vae_pipe.set_progress_bar_config(disable=None) out_vae_np = vae_pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] assert out_vae_np.shape == out_np.shape @slow @require_torch_accelerator class StableDiffusionXLControlNetXSPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_canny(self): controlnet = ControlNetXSAdapter.from_pretrained( "UmerHA/Testing-ConrolNetXS-SDXL-canny", torch_dtype=torch.float16 ) pipe = StableDiffusionXLControlNetXSPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16 ) pipe.enable_sequential_cpu_offload(device=torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images assert images[0].shape == (768, 512, 3) original_image = images[0, -3:, -3:, -1].flatten() expected_image = np.array([0.3202, 0.3151, 0.3328, 0.3172, 0.337, 0.3381, 0.3378, 0.3389, 0.3224]) assert np.allclose(original_image, expected_image, atol=1e-04) def test_depth(self): controlnet = ControlNetXSAdapter.from_pretrained( "UmerHA/Testing-ConrolNetXS-SDXL-depth", torch_dtype=torch.float16 ) pipe = StableDiffusionXLControlNetXSPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16 ) pipe.enable_sequential_cpu_offload(device=torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "Stormtrooper's lecture" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png" ) images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images assert images[0].shape == (512, 512, 3) original_image = images[0, -3:, -3:, -1].flatten() expected_image = np.array([0.5448, 0.5437, 0.5426, 0.5543, 0.553, 0.5475, 0.5595, 0.5602, 0.5529]) assert np.allclose(original_image, expected_image, atol=1e-04)
diffusers/tests/pipelines/controlnet_xs/test_controlnetxs_sdxl.py/0
{ "file_path": "diffusers/tests/pipelines/controlnet_xs/test_controlnetxs_sdxl.py", "repo_id": "diffusers", "token_count": 7504 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from PIL import Image from transformers import AutoTokenizer, T5EncoderModel from diffusers import ( AutoPipelineForImage2Image, AutoPipelineForText2Image, Kandinsky3Pipeline, Kandinsky3UNet, VQModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.schedulers.scheduling_ddpm import DDPMScheduler from diffusers.utils.testing_utils import ( enable_full_determinism, load_image, require_torch_gpu, slow, ) from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Kandinsky3PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = Kandinsky3Pipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS test_xformers_attention = False @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = Kandinsky3UNet( in_channels=4, time_embedding_dim=4, groups=2, attention_head_dim=4, layers_per_block=3, block_out_channels=(32, 64), cross_attention_dim=4, encoder_hid_dim=32, ) scheduler = DDPMScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="squaredcos_cap_v2", clip_sample=True, thresholding=False, ) torch.manual_seed(0) movq = self.dummy_movq torch.manual_seed(0) text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "unet": unet, "scheduler": scheduler, "movq": movq, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "width": 16, "height": 16, } return inputs def test_kandinsky3(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) expected_slice = np.array([0.3768, 0.4373, 0.4865, 0.4890, 0.4299, 0.5122, 0.4921, 0.4924, 0.5599]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) @slow @require_torch_gpu class Kandinsky3PipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinskyV3(self): pipe = AutoPipelineForText2Image.from_pretrained( "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) prompt = "A photograph of the inside of a subway train. There are raccoons sitting on the seats. One of them is reading a newspaper. The window shows the city in the background." generator = torch.Generator(device="cpu").manual_seed(0) image = pipe(prompt, num_inference_steps=5, generator=generator).images[0] assert image.size == (1024, 1024) expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/t2i.png" ) image_processor = VaeImageProcessor() image_np = image_processor.pil_to_numpy(image) expected_image_np = image_processor.pil_to_numpy(expected_image) self.assertTrue(np.allclose(image_np, expected_image_np, atol=5e-2)) def test_kandinskyV3_img2img(self): pipe = AutoPipelineForImage2Image.from_pretrained( "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/t2i.png" ) w, h = 512, 512 image = image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) prompt = "A painting of the inside of a subway train with tiny raccoons." image = pipe(prompt, image=image, strength=0.75, num_inference_steps=5, generator=generator).images[0] assert image.size == (512, 512) expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/i2i.png" ) image_processor = VaeImageProcessor() image_np = image_processor.pil_to_numpy(image) expected_image_np = image_processor.pil_to_numpy(expected_image) self.assertTrue(np.allclose(image_np, expected_image_np, atol=5e-2))
diffusers/tests/pipelines/kandinsky3/test_kandinsky3.py/0
{ "file_path": "diffusers/tests/pipelines/kandinsky3/test_kandinsky3.py", "repo_id": "diffusers", "token_count": 3586 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel, logging, ) from diffusers.utils.testing_utils import ( CaptureLogger, backend_empty_cache, enable_full_determinism, load_numpy, nightly, numpy_cosine_similarity_distance, require_torch_accelerator, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, SDFunctionTesterMixin, ) enable_full_determinism() class StableDiffusion2PipelineFastTests( SDFunctionTesterMixin, PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS test_layerwise_casting = True def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): generator_device = "cpu" if not device.startswith("cuda") else "cuda" if not str(device).startswith("mps"): generator = torch.Generator(device=generator_device).manual_seed(seed) else: generator = torch.manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_stable_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5753, 0.6113, 0.5005, 0.5036, 0.5464, 0.4725, 0.4982, 0.4865, 0.4861]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5121, 0.5714, 0.4827, 0.5057, 0.5646, 0.4766, 0.5189, 0.4895, 0.4990]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4865, 0.5439, 0.4840, 0.4995, 0.5543, 0.4846, 0.5199, 0.4942, 0.5061]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4864, 0.5440, 0.4842, 0.4994, 0.5543, 0.4846, 0.5196, 0.4942, 0.5063]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4865, 0.5439, 0.4840, 0.4995, 0.5543, 0.4846, 0.5199, 0.4942, 0.5061]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_unflawed(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = DDIMScheduler.from_config( components["scheduler"].config, timestep_spacing="trailing" ) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guidance_rescale"] = 0.7 inputs["num_inference_steps"] = 10 image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4736, 0.5405, 0.4705, 0.4955, 0.5675, 0.4812, 0.5310, 0.4967, 0.5064]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_long_prompt(self): components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) do_classifier_free_guidance = True negative_prompt = None num_images_per_prompt = 1 logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion") logger.setLevel(logging.WARNING) prompt = 25 * "@" with CaptureLogger(logger) as cap_logger_3: text_embeddings_3, negeative_text_embeddings_3 = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negeative_text_embeddings_3 is not None: text_embeddings_3 = torch.cat([negeative_text_embeddings_3, text_embeddings_3]) prompt = 100 * "@" with CaptureLogger(logger) as cap_logger: text_embeddings, negative_embeddings = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_embeddings is not None: text_embeddings = torch.cat([negative_embeddings, text_embeddings]) negative_prompt = "Hello" with CaptureLogger(logger) as cap_logger_2: text_embeddings_2, negative_text_embeddings_2 = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_text_embeddings_2 is not None: text_embeddings_2 = torch.cat([negative_text_embeddings_2, text_embeddings_2]) assert text_embeddings_3.shape == text_embeddings_2.shape == text_embeddings.shape assert text_embeddings.shape[1] == 77 assert cap_logger.out == cap_logger_2.out # 100 - 77 + 1 (BOS token) + 1 (EOS token) = 25 assert cap_logger.out.count("@") == 25 assert cap_logger_3.out == "" def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_accelerator @skip_mps class StableDiffusion2PipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): _generator_device = "cpu" if not generator_device.startswith("cuda") else "cuda" if not str(device).startswith("mps"): generator = torch.Generator(device=_generator_device).manual_seed(seed) else: generator = torch.manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_stable_diffusion_default_ddim(self): pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506]) assert np.abs(image_slice - expected_slice).max() < 7e-3 @require_torch_gpu def test_stable_diffusion_attention_slicing(self): torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 ) pipe.unet.set_default_attn_processor() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # enable attention slicing pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) image_sliced = pipe(**inputs).images mem_bytes = torch.cuda.max_memory_allocated() torch.cuda.reset_peak_memory_stats() # make sure that less than 3.3 GB is allocated assert mem_bytes < 3.3 * 10**9 # disable slicing pipe.disable_attention_slicing() pipe.unet.set_default_attn_processor() inputs = self.get_inputs(torch_device, dtype=torch.float16) image = pipe(**inputs).images # make sure that more than 3.3 GB is allocated mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes > 3.3 * 10**9 max_diff = numpy_cosine_similarity_distance(image.flatten(), image_sliced.flatten()) assert max_diff < 5e-3 @nightly @require_torch_accelerator @skip_mps class StableDiffusion2PipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): _generator_device = "cpu" if not generator_device.startswith("cuda") else "cuda" if not str(device).startswith("mps"): generator = torch.Generator(device=_generator_device).manual_seed(seed) else: generator = torch.manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_stable_diffusion_2_1_default(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_0_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py", "repo_id": "diffusers", "token_count": 7595 }
import gc import inspect import json import os import tempfile import unittest import uuid from typing import Any, Callable, Dict, Union import numpy as np import PIL.Image import torch import torch.nn as nn from huggingface_hub import ModelCard, delete_repo from huggingface_hub.utils import is_jinja_available from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AsymmetricAutoencoderKL, AutoencoderKL, AutoencoderTiny, ConsistencyDecoderVAE, DDIMScheduler, DiffusionPipeline, KolorsPipeline, PyramidAttentionBroadcastConfig, StableDiffusionPipeline, StableDiffusionXLPipeline, UNet2DConditionModel, ) from diffusers.hooks.pyramid_attention_broadcast import PyramidAttentionBroadcastHook from diffusers.image_processor import VaeImageProcessor from diffusers.loaders import FluxIPAdapterMixin, IPAdapterMixin from diffusers.models.attention_processor import AttnProcessor from diffusers.models.controlnets.controlnet_xs import UNetControlNetXSModel from diffusers.models.unets.unet_3d_condition import UNet3DConditionModel from diffusers.models.unets.unet_i2vgen_xl import I2VGenXLUNet from diffusers.models.unets.unet_motion_model import UNetMotionModel from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( CaptureLogger, require_accelerate_version_greater, require_accelerator, require_hf_hub_version_greater, require_torch, require_transformers_version_greater, skip_mps, torch_device, ) from ..models.autoencoders.vae import ( get_asym_autoencoder_kl_config, get_autoencoder_kl_config, get_autoencoder_tiny_config, get_consistency_vae_config, ) from ..models.transformers.test_models_transformer_flux import create_flux_ip_adapter_state_dict from ..models.unets.test_models_unet_2d_condition import ( create_ip_adapter_faceid_state_dict, create_ip_adapter_state_dict, ) from ..others.test_utils import TOKEN, USER, is_staging_test def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor def check_same_shape(tensor_list): shapes = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:]) def check_qkv_fusion_matches_attn_procs_length(model, original_attn_processors): current_attn_processors = model.attn_processors return len(current_attn_processors) == len(original_attn_processors) def check_qkv_fusion_processors_exist(model): current_attn_processors = model.attn_processors proc_names = [v.__class__.__name__ for _, v in current_attn_processors.items()] return all(p.startswith("Fused") for p in proc_names) class SDFunctionTesterMixin: """ This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes. It provides a set of common tests for PyTorch pipeline that inherit from StableDiffusionMixin, e.g. vae_slicing, vae_tiling, freeu, etc. """ def test_vae_slicing(self, image_count=4): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() # components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * image_count if "image" in inputs: # fix batch size mismatch in I2V_Gen pipeline inputs["image"] = [inputs["image"]] * image_count output_1 = pipe(**inputs) # make sure sliced vae decode yields the same result pipe.enable_vae_slicing() inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * image_count if "image" in inputs: inputs["image"] = [inputs["image"]] * image_count inputs["return_dict"] = False output_2 = pipe(**inputs) assert np.abs(output_2[0].flatten() - output_1[0].flatten()).max() < 1e-2 def test_vae_tiling(self): components = self.get_dummy_components() # make sure here that pndm scheduler skips prk if "safety_checker" in components: components["safety_checker"] = None pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["return_dict"] = False # Test that tiled decode at 512x512 yields the same result as the non-tiled decode output_1 = pipe(**inputs)[0] # make sure tiled vae decode yields the same result pipe.enable_vae_tiling() inputs = self.get_dummy_inputs(torch_device) inputs["return_dict"] = False output_2 = pipe(**inputs)[0] assert np.abs(to_np(output_2) - to_np(output_1)).max() < 5e-1 # test that tiled decode works with various shapes shapes = [(1, 4, 73, 97), (1, 4, 65, 49)] with torch.no_grad(): for shape in shapes: zeros = torch.zeros(shape).to(torch_device) pipe.vae.decode(zeros) # MPS currently doesn't support ComplexFloats, which are required for FreeU - see https://github.com/huggingface/diffusers/issues/7569. @skip_mps def test_freeu(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # Normal inference inputs = self.get_dummy_inputs(torch_device) inputs["return_dict"] = False inputs["output_type"] = "np" output = pipe(**inputs)[0] # FreeU-enabled inference pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) inputs = self.get_dummy_inputs(torch_device) inputs["return_dict"] = False inputs["output_type"] = "np" output_freeu = pipe(**inputs)[0] # FreeU-disabled inference pipe.disable_freeu() freeu_keys = {"s1", "s2", "b1", "b2"} for upsample_block in pipe.unet.up_blocks: for key in freeu_keys: assert getattr(upsample_block, key) is None, f"Disabling of FreeU should have set {key} to None." inputs = self.get_dummy_inputs(torch_device) inputs["return_dict"] = False inputs["output_type"] = "np" output_no_freeu = pipe(**inputs)[0] assert not np.allclose( output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1] ), "Enabling of FreeU should lead to different results." assert np.allclose( output, output_no_freeu, atol=1e-2 ), f"Disabling of FreeU should lead to results similar to the default pipeline results but Max Abs Error={np.abs(output_no_freeu - output).max()}." def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["return_dict"] = False image = pipe(**inputs)[0] original_image_slice = image[0, -3:, -3:, -1] pipe.fuse_qkv_projections() for _, component in pipe.components.items(): if ( isinstance(component, nn.Module) and hasattr(component, "original_attn_processors") and component.original_attn_processors is not None ): assert check_qkv_fusion_processors_exist( component ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( component, component.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." inputs = self.get_dummy_inputs(device) inputs["return_dict"] = False image_fused = pipe(**inputs)[0] image_slice_fused = image_fused[0, -3:, -3:, -1] pipe.unfuse_qkv_projections() inputs = self.get_dummy_inputs(device) inputs["return_dict"] = False image_disabled = pipe(**inputs)[0] image_slice_disabled = image_disabled[0, -3:, -3:, -1] assert np.allclose( original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2 ), "Fusion of QKV projections shouldn't affect the outputs." assert np.allclose( image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2 ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." assert np.allclose( original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 ), "Original outputs should match when fused QKV projections are disabled." class IPAdapterTesterMixin: """ This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes. It provides a set of common tests for pipelines that support IP Adapters. """ def test_pipeline_signature(self): parameters = inspect.signature(self.pipeline_class.__call__).parameters assert issubclass(self.pipeline_class, IPAdapterMixin) self.assertIn( "ip_adapter_image", parameters, "`ip_adapter_image` argument must be supported by the `__call__` method", ) self.assertIn( "ip_adapter_image_embeds", parameters, "`ip_adapter_image_embeds` argument must be supported by the `__call__` method", ) def _get_dummy_image_embeds(self, cross_attention_dim: int = 32): return torch.randn((2, 1, cross_attention_dim), device=torch_device) def _get_dummy_faceid_image_embeds(self, cross_attention_dim: int = 32): return torch.randn((2, 1, 1, cross_attention_dim), device=torch_device) def _get_dummy_masks(self, input_size: int = 64): _masks = torch.zeros((1, 1, input_size, input_size), device=torch_device) _masks[0, :, :, : int(input_size / 2)] = 1 return _masks def _modify_inputs_for_ip_adapter_test(self, inputs: Dict[str, Any]): parameters = inspect.signature(self.pipeline_class.__call__).parameters if "image" in parameters.keys() and "strength" in parameters.keys(): inputs["num_inference_steps"] = 4 inputs["output_type"] = "np" inputs["return_dict"] = False return inputs def test_ip_adapter(self, expected_max_diff: float = 1e-4, expected_pipe_slice=None): r"""Tests for IP-Adapter. The following scenarios are tested: - Single IP-Adapter with scale=0 should produce same output as no IP-Adapter. - Multi IP-Adapter with scale=0 should produce same output as no IP-Adapter. - Single IP-Adapter with scale!=0 should produce different output compared to no IP-Adapter. - Multi IP-Adapter with scale!=0 should produce different output compared to no IP-Adapter. """ # Raising the tolerance for this test when it's run on a CPU because we # compare against static slices and that can be shaky (with a VVVV low probability). expected_max_diff = 9e-4 if torch_device == "cpu" else expected_max_diff components = self.get_dummy_components() pipe = self.pipeline_class(**components).to(torch_device) pipe.set_progress_bar_config(disable=None) cross_attention_dim = pipe.unet.config.get("cross_attention_dim", 32) # forward pass without ip adapter inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) if expected_pipe_slice is None: output_without_adapter = pipe(**inputs)[0] else: output_without_adapter = expected_pipe_slice # 1. Single IP-Adapter test cases adapter_state_dict = create_ip_adapter_state_dict(pipe.unet) pipe.unet._load_ip_adapter_weights(adapter_state_dict) # forward pass with single ip adapter, but scale=0 which should have no effect inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] pipe.set_ip_adapter_scale(0.0) output_without_adapter_scale = pipe(**inputs)[0] if expected_pipe_slice is not None: output_without_adapter_scale = output_without_adapter_scale[0, -3:, -3:, -1].flatten() # forward pass with single ip adapter, but with scale of adapter weights inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] pipe.set_ip_adapter_scale(42.0) output_with_adapter_scale = pipe(**inputs)[0] if expected_pipe_slice is not None: output_with_adapter_scale = output_with_adapter_scale[0, -3:, -3:, -1].flatten() max_diff_without_adapter_scale = np.abs(output_without_adapter_scale - output_without_adapter).max() max_diff_with_adapter_scale = np.abs(output_with_adapter_scale - output_without_adapter).max() self.assertLess( max_diff_without_adapter_scale, expected_max_diff, "Output without ip-adapter must be same as normal inference", ) self.assertGreater( max_diff_with_adapter_scale, 1e-2, "Output with ip-adapter must be different from normal inference" ) # 2. Multi IP-Adapter test cases adapter_state_dict_1 = create_ip_adapter_state_dict(pipe.unet) adapter_state_dict_2 = create_ip_adapter_state_dict(pipe.unet) pipe.unet._load_ip_adapter_weights([adapter_state_dict_1, adapter_state_dict_2]) # forward pass with multi ip adapter, but scale=0 which should have no effect inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] * 2 pipe.set_ip_adapter_scale([0.0, 0.0]) output_without_multi_adapter_scale = pipe(**inputs)[0] if expected_pipe_slice is not None: output_without_multi_adapter_scale = output_without_multi_adapter_scale[0, -3:, -3:, -1].flatten() # forward pass with multi ip adapter, but with scale of adapter weights inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] * 2 pipe.set_ip_adapter_scale([42.0, 42.0]) output_with_multi_adapter_scale = pipe(**inputs)[0] if expected_pipe_slice is not None: output_with_multi_adapter_scale = output_with_multi_adapter_scale[0, -3:, -3:, -1].flatten() max_diff_without_multi_adapter_scale = np.abs( output_without_multi_adapter_scale - output_without_adapter ).max() max_diff_with_multi_adapter_scale = np.abs(output_with_multi_adapter_scale - output_without_adapter).max() self.assertLess( max_diff_without_multi_adapter_scale, expected_max_diff, "Output without multi-ip-adapter must be same as normal inference", ) self.assertGreater( max_diff_with_multi_adapter_scale, 1e-2, "Output with multi-ip-adapter scale must be different from normal inference", ) def test_ip_adapter_cfg(self, expected_max_diff: float = 1e-4): parameters = inspect.signature(self.pipeline_class.__call__).parameters if "guidance_scale" not in parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components).to(torch_device) pipe.set_progress_bar_config(disable=None) cross_attention_dim = pipe.unet.config.get("cross_attention_dim", 32) adapter_state_dict = create_ip_adapter_state_dict(pipe.unet) pipe.unet._load_ip_adapter_weights(adapter_state_dict) pipe.set_ip_adapter_scale(1.0) # forward pass with CFG not applied inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)[0].unsqueeze(0)] inputs["guidance_scale"] = 1.0 out_no_cfg = pipe(**inputs)[0] # forward pass with CFG applied inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] inputs["guidance_scale"] = 7.5 out_cfg = pipe(**inputs)[0] assert out_cfg.shape == out_no_cfg.shape def test_ip_adapter_masks(self, expected_max_diff: float = 1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components).to(torch_device) pipe.set_progress_bar_config(disable=None) cross_attention_dim = pipe.unet.config.get("cross_attention_dim", 32) sample_size = pipe.unet.config.get("sample_size", 32) block_out_channels = pipe.vae.config.get("block_out_channels", [128, 256, 512, 512]) input_size = sample_size * (2 ** (len(block_out_channels) - 1)) # forward pass without ip adapter inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) output_without_adapter = pipe(**inputs)[0] output_without_adapter = output_without_adapter[0, -3:, -3:, -1].flatten() adapter_state_dict = create_ip_adapter_state_dict(pipe.unet) pipe.unet._load_ip_adapter_weights(adapter_state_dict) # forward pass with single ip adapter and masks, but scale=0 which should have no effect inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] inputs["cross_attention_kwargs"] = {"ip_adapter_masks": [self._get_dummy_masks(input_size)]} pipe.set_ip_adapter_scale(0.0) output_without_adapter_scale = pipe(**inputs)[0] output_without_adapter_scale = output_without_adapter_scale[0, -3:, -3:, -1].flatten() # forward pass with single ip adapter and masks, but with scale of adapter weights inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] inputs["cross_attention_kwargs"] = {"ip_adapter_masks": [self._get_dummy_masks(input_size)]} pipe.set_ip_adapter_scale(42.0) output_with_adapter_scale = pipe(**inputs)[0] output_with_adapter_scale = output_with_adapter_scale[0, -3:, -3:, -1].flatten() max_diff_without_adapter_scale = np.abs(output_without_adapter_scale - output_without_adapter).max() max_diff_with_adapter_scale = np.abs(output_with_adapter_scale - output_without_adapter).max() self.assertLess( max_diff_without_adapter_scale, expected_max_diff, "Output without ip-adapter must be same as normal inference", ) self.assertGreater( max_diff_with_adapter_scale, 1e-3, "Output with ip-adapter must be different from normal inference" ) def test_ip_adapter_faceid(self, expected_max_diff: float = 1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components).to(torch_device) pipe.set_progress_bar_config(disable=None) cross_attention_dim = pipe.unet.config.get("cross_attention_dim", 32) # forward pass without ip adapter inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) output_without_adapter = pipe(**inputs)[0] output_without_adapter = output_without_adapter[0, -3:, -3:, -1].flatten() adapter_state_dict = create_ip_adapter_faceid_state_dict(pipe.unet) pipe.unet._load_ip_adapter_weights(adapter_state_dict) # forward pass with single ip adapter, but scale=0 which should have no effect inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_faceid_image_embeds(cross_attention_dim)] pipe.set_ip_adapter_scale(0.0) output_without_adapter_scale = pipe(**inputs)[0] output_without_adapter_scale = output_without_adapter_scale[0, -3:, -3:, -1].flatten() # forward pass with single ip adapter, but with scale of adapter weights inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_faceid_image_embeds(cross_attention_dim)] pipe.set_ip_adapter_scale(42.0) output_with_adapter_scale = pipe(**inputs)[0] output_with_adapter_scale = output_with_adapter_scale[0, -3:, -3:, -1].flatten() max_diff_without_adapter_scale = np.abs(output_without_adapter_scale - output_without_adapter).max() max_diff_with_adapter_scale = np.abs(output_with_adapter_scale - output_without_adapter).max() self.assertLess( max_diff_without_adapter_scale, expected_max_diff, "Output without ip-adapter must be same as normal inference", ) self.assertGreater( max_diff_with_adapter_scale, 1e-3, "Output with ip-adapter must be different from normal inference" ) class FluxIPAdapterTesterMixin: """ This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes. It provides a set of common tests for pipelines that support IP Adapters. """ def test_pipeline_signature(self): parameters = inspect.signature(self.pipeline_class.__call__).parameters assert issubclass(self.pipeline_class, FluxIPAdapterMixin) self.assertIn( "ip_adapter_image", parameters, "`ip_adapter_image` argument must be supported by the `__call__` method", ) self.assertIn( "ip_adapter_image_embeds", parameters, "`ip_adapter_image_embeds` argument must be supported by the `__call__` method", ) def _get_dummy_image_embeds(self, image_embed_dim: int = 768): return torch.randn((1, 1, image_embed_dim), device=torch_device) def _modify_inputs_for_ip_adapter_test(self, inputs: Dict[str, Any]): inputs["negative_prompt"] = "" inputs["true_cfg_scale"] = 4.0 inputs["output_type"] = "np" inputs["return_dict"] = False return inputs def test_ip_adapter(self, expected_max_diff: float = 1e-4, expected_pipe_slice=None): r"""Tests for IP-Adapter. The following scenarios are tested: - Single IP-Adapter with scale=0 should produce same output as no IP-Adapter. - Single IP-Adapter with scale!=0 should produce different output compared to no IP-Adapter. """ # Raising the tolerance for this test when it's run on a CPU because we # compare against static slices and that can be shaky (with a VVVV low probability). expected_max_diff = 9e-4 if torch_device == "cpu" else expected_max_diff components = self.get_dummy_components() pipe = self.pipeline_class(**components).to(torch_device) pipe.set_progress_bar_config(disable=None) image_embed_dim = pipe.transformer.config.pooled_projection_dim # forward pass without ip adapter inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) if expected_pipe_slice is None: output_without_adapter = pipe(**inputs)[0] else: output_without_adapter = expected_pipe_slice adapter_state_dict = create_flux_ip_adapter_state_dict(pipe.transformer) pipe.transformer._load_ip_adapter_weights(adapter_state_dict) # forward pass with single ip adapter, but scale=0 which should have no effect inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] inputs["negative_ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] pipe.set_ip_adapter_scale(0.0) output_without_adapter_scale = pipe(**inputs)[0] if expected_pipe_slice is not None: output_without_adapter_scale = output_without_adapter_scale[0, -3:, -3:, -1].flatten() # forward pass with single ip adapter, but with scale of adapter weights inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] inputs["negative_ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] pipe.set_ip_adapter_scale(42.0) output_with_adapter_scale = pipe(**inputs)[0] if expected_pipe_slice is not None: output_with_adapter_scale = output_with_adapter_scale[0, -3:, -3:, -1].flatten() max_diff_without_adapter_scale = np.abs(output_without_adapter_scale - output_without_adapter).max() max_diff_with_adapter_scale = np.abs(output_with_adapter_scale - output_without_adapter).max() self.assertLess( max_diff_without_adapter_scale, expected_max_diff, "Output without ip-adapter must be same as normal inference", ) self.assertGreater( max_diff_with_adapter_scale, 1e-2, "Output with ip-adapter must be different from normal inference" ) class PipelineLatentTesterMixin: """ This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes. It provides a set of common tests for PyTorch pipeline that has vae, e.g. equivalence of different input and output types, etc. """ @property def image_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `image_params` in the child test class. " "`image_params` are tested for if all accepted input image types (i.e. `pt`,`pil`,`np`) are producing same results" ) @property def image_latents_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `image_latents_params` in the child test class. " "`image_latents_params` are tested for if passing latents directly are producing same results" ) def get_dummy_inputs_by_type(self, device, seed=0, input_image_type="pt", output_type="np"): inputs = self.get_dummy_inputs(device, seed) def convert_to_pt(image): if isinstance(image, torch.Tensor): input_image = image elif isinstance(image, np.ndarray): input_image = VaeImageProcessor.numpy_to_pt(image) elif isinstance(image, PIL.Image.Image): input_image = VaeImageProcessor.pil_to_numpy(image) input_image = VaeImageProcessor.numpy_to_pt(input_image) else: raise ValueError(f"unsupported input_image_type {type(image)}") return input_image def convert_pt_to_type(image, input_image_type): if input_image_type == "pt": input_image = image elif input_image_type == "np": input_image = VaeImageProcessor.pt_to_numpy(image) elif input_image_type == "pil": input_image = VaeImageProcessor.pt_to_numpy(image) input_image = VaeImageProcessor.numpy_to_pil(input_image) else: raise ValueError(f"unsupported input_image_type {input_image_type}.") return input_image for image_param in self.image_params: if image_param in inputs.keys(): inputs[image_param] = convert_pt_to_type( convert_to_pt(inputs[image_param]).to(device), input_image_type ) inputs["output_type"] = output_type return inputs def test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4): self._test_pt_np_pil_outputs_equivalent(expected_max_diff=expected_max_diff) def _test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4, input_image_type="pt"): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) output_pt = pipe( **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type="pt") )[0] output_np = pipe( **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type="np") )[0] output_pil = pipe( **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type="pil") )[0] max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max() self.assertLess( max_diff, expected_max_diff, "`output_type=='pt'` generate different results from `output_type=='np'`" ) max_diff = np.abs(np.array(output_pil[0]) - (output_np * 255).round()).max() self.assertLess(max_diff, 2.0, "`output_type=='pil'` generate different results from `output_type=='np'`") def test_pt_np_pil_inputs_equivalent(self): if len(self.image_params) == 0: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) out_input_pt = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pt"))[0] out_input_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] out_input_pil = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pil"))[0] max_diff = np.abs(out_input_pt - out_input_np).max() self.assertLess(max_diff, 1e-4, "`input_type=='pt'` generate different result from `input_type=='np'`") max_diff = np.abs(out_input_pil - out_input_np).max() self.assertLess(max_diff, 1e-2, "`input_type=='pt'` generate different result from `input_type=='np'`") def test_latents_input(self): if len(self.image_latents_params) == 0: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) out = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pt"))[0] vae = components["vae"] inputs = self.get_dummy_inputs_by_type(torch_device, input_image_type="pt") generator = inputs["generator"] for image_param in self.image_latents_params: if image_param in inputs.keys(): inputs[image_param] = ( vae.encode(inputs[image_param]).latent_dist.sample(generator) * vae.config.scaling_factor ) out_latents_inputs = pipe(**inputs)[0] max_diff = np.abs(out - out_latents_inputs).max() self.assertLess(max_diff, 1e-4, "passing latents as image input generate different result from passing image") def test_multi_vae(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) block_out_channels = pipe.vae.config.block_out_channels norm_num_groups = pipe.vae.config.norm_num_groups vae_classes = [AutoencoderKL, AsymmetricAutoencoderKL, ConsistencyDecoderVAE, AutoencoderTiny] configs = [ get_autoencoder_kl_config(block_out_channels, norm_num_groups), get_asym_autoencoder_kl_config(block_out_channels, norm_num_groups), get_consistency_vae_config(block_out_channels, norm_num_groups), get_autoencoder_tiny_config(block_out_channels), ] out_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] for vae_cls, config in zip(vae_classes, configs): vae = vae_cls(**config) vae = vae.to(torch_device) components["vae"] = vae vae_pipe = self.pipeline_class(**components) out_vae_np = vae_pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] assert out_vae_np.shape == out_np.shape @require_torch class PipelineFromPipeTesterMixin: @property def original_pipeline_class(self): if "xl" in self.pipeline_class.__name__.lower(): original_pipeline_class = StableDiffusionXLPipeline elif "kolors" in self.pipeline_class.__name__.lower(): original_pipeline_class = KolorsPipeline else: original_pipeline_class = StableDiffusionPipeline return original_pipeline_class def get_dummy_inputs_pipe(self, device, seed=0): inputs = self.get_dummy_inputs(device, seed=seed) inputs["output_type"] = "np" inputs["return_dict"] = False return inputs def get_dummy_inputs_for_pipe_original(self, device, seed=0): inputs = {} for k, v in self.get_dummy_inputs_pipe(device, seed=seed).items(): if k in set(inspect.signature(self.original_pipeline_class.__call__).parameters.keys()): inputs[k] = v return inputs def test_from_pipe_consistent_config(self): if self.original_pipeline_class == StableDiffusionPipeline: original_repo = "hf-internal-testing/tiny-stable-diffusion-pipe" original_kwargs = {"requires_safety_checker": False} elif self.original_pipeline_class == StableDiffusionXLPipeline: original_repo = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" original_kwargs = {"requires_aesthetics_score": True, "force_zeros_for_empty_prompt": False} elif self.original_pipeline_class == KolorsPipeline: original_repo = "hf-internal-testing/tiny-kolors-pipe" original_kwargs = {"force_zeros_for_empty_prompt": False} else: raise ValueError( "original_pipeline_class must be either StableDiffusionPipeline or StableDiffusionXLPipeline" ) # create original_pipeline_class(sd/sdxl) pipe_original = self.original_pipeline_class.from_pretrained(original_repo, **original_kwargs) # original_pipeline_class(sd/sdxl) -> pipeline_class pipe_components = self.get_dummy_components() pipe_additional_components = {} for name, component in pipe_components.items(): if name not in pipe_original.components: pipe_additional_components[name] = component pipe = self.pipeline_class.from_pipe(pipe_original, **pipe_additional_components) # pipeline_class -> original_pipeline_class(sd/sdxl) original_pipe_additional_components = {} for name, component in pipe_original.components.items(): if name not in pipe.components or not isinstance(component, pipe.components[name].__class__): original_pipe_additional_components[name] = component pipe_original_2 = self.original_pipeline_class.from_pipe(pipe, **original_pipe_additional_components) # compare the config original_config = {k: v for k, v in pipe_original.config.items() if not k.startswith("_")} original_config_2 = {k: v for k, v in pipe_original_2.config.items() if not k.startswith("_")} assert original_config_2 == original_config def test_from_pipe_consistent_forward_pass(self, expected_max_diff=1e-3): components = self.get_dummy_components() original_expected_modules, _ = self.original_pipeline_class._get_signature_keys(self.original_pipeline_class) # pipeline components that are also expected to be in the original pipeline original_pipe_components = {} # additional components that are not in the pipeline, but expected in the original pipeline original_pipe_additional_components = {} # additional components that are in the pipeline, but not expected in the original pipeline current_pipe_additional_components = {} for name, component in components.items(): if name in original_expected_modules: original_pipe_components[name] = component else: current_pipe_additional_components[name] = component for name in original_expected_modules: if name not in original_pipe_components: if name in self.original_pipeline_class._optional_components: original_pipe_additional_components[name] = None else: raise ValueError(f"missing required module for {self.original_pipeline_class.__class__}: {name}") pipe_original = self.original_pipeline_class(**original_pipe_components, **original_pipe_additional_components) for component in pipe_original.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_original.to(torch_device) pipe_original.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs_for_pipe_original(torch_device) output_original = pipe_original(**inputs)[0] pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs_pipe(torch_device) output = pipe(**inputs)[0] pipe_from_original = self.pipeline_class.from_pipe(pipe_original, **current_pipe_additional_components) pipe_from_original.to(torch_device) pipe_from_original.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs_pipe(torch_device) output_from_original = pipe_from_original(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_from_original)).max() self.assertLess( max_diff, expected_max_diff, "The outputs of the pipelines created with `from_pipe` and `__init__` are different.", ) inputs = self.get_dummy_inputs_for_pipe_original(torch_device) output_original_2 = pipe_original(**inputs)[0] max_diff = np.abs(to_np(output_original) - to_np(output_original_2)).max() self.assertLess(max_diff, expected_max_diff, "`from_pipe` should not change the output of original pipeline.") for component in pipe_original.components.values(): if hasattr(component, "attn_processors"): assert all( type(proc) == AttnProcessor for proc in component.attn_processors.values() ), "`from_pipe` changed the attention processor in original pipeline." @require_accelerator @require_accelerate_version_greater("0.14.0") def test_from_pipe_consistent_forward_pass_cpu_offload(self, expected_max_diff=1e-3): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.enable_model_cpu_offload(device=torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs_pipe(torch_device) output = pipe(**inputs)[0] original_expected_modules, _ = self.original_pipeline_class._get_signature_keys(self.original_pipeline_class) # pipeline components that are also expected to be in the original pipeline original_pipe_components = {} # additional components that are not in the pipeline, but expected in the original pipeline original_pipe_additional_components = {} # additional components that are in the pipeline, but not expected in the original pipeline current_pipe_additional_components = {} for name, component in components.items(): if name in original_expected_modules: original_pipe_components[name] = component else: current_pipe_additional_components[name] = component for name in original_expected_modules: if name not in original_pipe_components: if name in self.original_pipeline_class._optional_components: original_pipe_additional_components[name] = None else: raise ValueError(f"missing required module for {self.original_pipeline_class.__class__}: {name}") pipe_original = self.original_pipeline_class(**original_pipe_components, **original_pipe_additional_components) for component in pipe_original.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_original.set_progress_bar_config(disable=None) pipe_from_original = self.pipeline_class.from_pipe(pipe_original, **current_pipe_additional_components) for component in pipe_from_original.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_from_original.enable_model_cpu_offload(device=torch_device) pipe_from_original.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs_pipe(torch_device) output_from_original = pipe_from_original(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_from_original)).max() self.assertLess( max_diff, expected_max_diff, "The outputs of the pipelines created with `from_pipe` and `__init__` are different.", ) @require_torch class PipelineKarrasSchedulerTesterMixin: """ This mixin is designed to be used with unittest.TestCase classes. It provides a set of common tests for each PyTorch pipeline that makes use of KarrasDiffusionSchedulers equivalence of dict and tuple outputs, etc. """ def test_karras_schedulers_shape( self, num_inference_steps_for_strength=4, num_inference_steps_for_strength_for_iterations=5 ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=True) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 2 if "strength" in inputs: inputs["num_inference_steps"] = num_inference_steps_for_strength inputs["strength"] = 0.5 outputs = [] for scheduler_enum in KarrasDiffusionSchedulers: if "KDPM2" in scheduler_enum.name: inputs["num_inference_steps"] = num_inference_steps_for_strength_for_iterations scheduler_cls = getattr(diffusers, scheduler_enum.name) pipe.scheduler = scheduler_cls.from_config(pipe.scheduler.config) output = pipe(**inputs)[0] outputs.append(output) if "KDPM2" in scheduler_enum.name: inputs["num_inference_steps"] = 2 assert check_same_shape(outputs) @require_torch class PipelineTesterMixin: """ This mixin is designed to be used with unittest.TestCase classes. It provides a set of common tests for each PyTorch pipeline, e.g. saving and loading the pipeline, equivalence of dict and tuple outputs, etc. """ # Canonical parameters that are passed to `__call__` regardless # of the type of pipeline. They are always optional and have common # sense default values. required_optional_params = frozenset( [ "num_inference_steps", "num_images_per_prompt", "generator", "latents", "output_type", "return_dict", ] ) # set these parameters to False in the child class if the pipeline does not support the corresponding functionality test_attention_slicing = True test_xformers_attention = True test_layerwise_casting = False supports_dduf = True def get_generator(self, seed): device = torch_device if torch_device != "mps" else "cpu" generator = torch.Generator(device).manual_seed(seed) return generator @property def pipeline_class(self) -> Union[Callable, DiffusionPipeline]: raise NotImplementedError( "You need to set the attribute `pipeline_class = ClassNameOfPipeline` in the child test class. " "See existing pipeline tests for reference." ) def get_dummy_components(self): raise NotImplementedError( "You need to implement `get_dummy_components(self)` in the child test class. " "See existing pipeline tests for reference." ) def get_dummy_inputs(self, device, seed=0): raise NotImplementedError( "You need to implement `get_dummy_inputs(self, device, seed)` in the child test class. " "See existing pipeline tests for reference." ) @property def params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `params` in the child test class. " "`params` are checked for if all values are present in `__call__`'s signature." " You can set `params` using one of the common set of parameters defined in `pipeline_params.py`" " e.g., `TEXT_TO_IMAGE_PARAMS` defines the common parameters used in text to " "image pipelines, including prompts and prompt embedding overrides." "If your pipeline's set of arguments has minor changes from one of the common sets of arguments, " "do not make modifications to the existing common sets of arguments. I.e. a text to image pipeline " "with non-configurable height and width arguments should set the attribute as " "`params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. " "See existing pipeline tests for reference." ) @property def batch_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `batch_params` in the child test class. " "`batch_params` are the parameters required to be batched when passed to the pipeline's " "`__call__` method. `pipeline_params.py` provides some common sets of parameters such as " "`TEXT_TO_IMAGE_BATCH_PARAMS`, `IMAGE_VARIATION_BATCH_PARAMS`, etc... If your pipeline's " "set of batch arguments has minor changes from one of the common sets of batch arguments, " "do not make modifications to the existing common sets of batch arguments. I.e. a text to " "image pipeline `negative_prompt` is not batched should set the attribute as " "`batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {'negative_prompt'}`. " "See existing pipeline tests for reference." ) @property def callback_cfg_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `callback_cfg_params` in the child test class that requires to run test_callback_cfg. " "`callback_cfg_params` are the parameters that needs to be passed to the pipeline's callback " "function when dynamically adjusting `guidance_scale`. They are variables that require special" "treatment when `do_classifier_free_guidance` is `True`. `pipeline_params.py` provides some common" " sets of parameters such as `TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS`. If your pipeline's " "set of cfg arguments has minor changes from one of the common sets of cfg arguments, " "do not make modifications to the existing common sets of cfg arguments. I.e. for inpaint pipeline, you " " need to adjust batch size of `mask` and `masked_image_latents` so should set the attribute as" "`callback_cfg_params = TEXT_TO_IMAGE_CFG_PARAMS.union({'mask', 'masked_image_latents'})`" ) def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): # clean up the VRAM after each test in case of CUDA runtime errors super().tearDown() gc.collect() torch.cuda.empty_cache() def test_save_load_local(self, expected_max_difference=5e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] logger = logging.get_logger("diffusers.pipelines.pipeline_utils") logger.setLevel(diffusers.logging.INFO) with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) with CaptureLogger(logger) as cap_logger: pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() for name in pipe_loaded.components.keys(): if name not in pipe_loaded._optional_components: assert name in str(cap_logger) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) def test_pipeline_call_signature(self): self.assertTrue( hasattr(self.pipeline_class, "__call__"), f"{self.pipeline_class} should have a `__call__` method" ) parameters = inspect.signature(self.pipeline_class.__call__).parameters optional_parameters = set() for k, v in parameters.items(): if v.default != inspect._empty: optional_parameters.add(k) parameters = set(parameters.keys()) parameters.remove("self") parameters.discard("kwargs") # kwargs can be added if arguments of pipeline call function are deprecated remaining_required_parameters = set() for param in self.params: if param not in parameters: remaining_required_parameters.add(param) self.assertTrue( len(remaining_required_parameters) == 0, f"Required parameters not present: {remaining_required_parameters}", ) remaining_required_optional_parameters = set() for param in self.required_optional_params: if param not in optional_parameters: remaining_required_optional_parameters.add(param) self.assertTrue( len(remaining_required_optional_parameters) == 0, f"Required optional parameters not present: {remaining_required_optional_parameters}", ) def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes) def _test_inference_batch_consistent( self, batch_sizes=[2], additional_params_copy_to_batched_inputs=["num_inference_steps"], batch_generator=True ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # prepare batched inputs batched_inputs = [] for batch_size in batch_sizes: batched_input = {} batched_input.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) # make unequal batch sizes batched_input[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] # make last batch super long batched_input[name][-1] = 100 * "very long" else: batched_input[name] = batch_size * [value] if batch_generator and "generator" in inputs: batched_input["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_input["batch_size"] = batch_size batched_inputs.append(batched_input) logger.setLevel(level=diffusers.logging.WARNING) for batch_size, batched_input in zip(batch_sizes, batched_inputs): output = pipe(**batched_input) assert len(output[0]) == batch_size def test_inference_batch_single_identical(self, batch_size=3, expected_max_diff=1e-4): self._test_inference_batch_single_identical(batch_size=batch_size, expected_max_diff=expected_max_diff) def _test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for components in pipe.components.values(): if hasattr(components, "set_default_attn_processor"): components.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] batched_inputs[name][-1] = 100 * "very long" else: batched_inputs[name] = batch_size * [value] if "generator" in inputs: batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_inputs["batch_size"] = batch_size for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] output = pipe(**inputs) output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff def test_dict_tuple_outputs_equivalent(self, expected_slice=None, expected_max_difference=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" if expected_slice is None: output = pipe(**self.get_dummy_inputs(generator_device))[0] else: output = expected_slice output_tuple = pipe(**self.get_dummy_inputs(generator_device), return_dict=False)[0] if expected_slice is None: max_diff = np.abs(to_np(output) - to_np(output_tuple)).max() else: if output_tuple.ndim != 5: max_diff = np.abs(to_np(output) - to_np(output_tuple)[0, -3:, -3:, -1].flatten()).max() else: max_diff = np.abs(to_np(output) - to_np(output_tuple)[0, -3:, -3:, -1, -1].flatten()).max() self.assertLess(max_diff, expected_max_difference) def test_components_function(self): init_components = self.get_dummy_components() init_components = {k: v for k, v in init_components.items() if not isinstance(v, (str, int, float))} pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") @require_accelerator def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) components = self.get_dummy_components() pipe_fp16 = self.pipeline_class(**components) for component in pipe_fp16.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_fp16.to(torch_device, torch.float16) pipe_fp16.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is used inside dummy inputs if "generator" in inputs: inputs["generator"] = self.get_generator(0) output = pipe(**inputs)[0] fp16_inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is used inside dummy inputs if "generator" in fp16_inputs: fp16_inputs["generator"] = self.get_generator(0) output_fp16 = pipe_fp16(**fp16_inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") @require_accelerator def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for name, component in pipe_loaded.components.items(): if hasattr(component, "dtype"): self.assertTrue( component.dtype == torch.float16, f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess( max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading." ) def test_save_load_optional_components(self, expected_max_difference=1e-4): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(generator_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to(torch_device) model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] self.assertTrue(all(device == torch_device for device in model_devices)) output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) model_dtypes = [component.dtype for component in components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(dtype=torch.float16) model_dtypes = [component.dtype for component in components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) def test_attention_slicing_forward_pass(self, expected_max_diff=1e-3): self._test_attention_slicing_forward_pass(expected_max_diff=expected_max_diff) def _test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", ) if test_mean_pixel_difference: assert_mean_pixel_difference(to_np(output_with_slicing1[0]), to_np(output_without_slicing[0])) assert_mean_pixel_difference(to_np(output_with_slicing2[0]), to_np(output_without_slicing[0])) @require_accelerator @require_accelerate_version_greater("0.14.0") def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): import accelerate components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_offload = pipe(**inputs)[0] pipe.enable_sequential_cpu_offload(device=torch_device) assert pipe._execution_device.type == torch_device inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs)[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") # make sure all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are offloaded correctly offloaded_modules = { k: v for k, v in pipe.components.items() if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload } # 1. all offloaded modules should be saved to cpu and moved to meta device self.assertTrue( all(v.device.type == "meta" for v in offloaded_modules.values()), f"Not offloaded: {[k for k, v in offloaded_modules.items() if v.device.type != 'meta']}", ) # 2. all offloaded modules should have hook installed self.assertTrue( all(hasattr(v, "_hf_hook") for k, v in offloaded_modules.items()), f"No hook attached: {[k for k, v in offloaded_modules.items() if not hasattr(v, '_hf_hook')]}", ) # 3. all offloaded modules should have correct hooks installed, should be either one of these two # - `AlignDevicesHook` # - a SequentialHook` that contains `AlignDevicesHook` offloaded_modules_with_incorrect_hooks = {} for k, v in offloaded_modules.items(): if hasattr(v, "_hf_hook"): if isinstance(v._hf_hook, accelerate.hooks.SequentialHook): # if it is a `SequentialHook`, we loop through its `hooks` attribute to check if it only contains `AlignDevicesHook` for hook in v._hf_hook.hooks: if not isinstance(hook, accelerate.hooks.AlignDevicesHook): offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook.hooks[0]) elif not isinstance(v._hf_hook, accelerate.hooks.AlignDevicesHook): offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook) self.assertTrue( len(offloaded_modules_with_incorrect_hooks) == 0, f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", ) @require_accelerator @require_accelerate_version_greater("0.17.0") def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): import accelerate generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(generator_device) output_without_offload = pipe(**inputs)[0] pipe.enable_model_cpu_offload(device=torch_device) assert pipe._execution_device.type == torch_device inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs)[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") # make sure all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are offloaded correctly offloaded_modules = { k: v for k, v in pipe.components.items() if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload } # 1. check if all offloaded modules are saved to cpu self.assertTrue( all(v.device.type == "cpu" for v in offloaded_modules.values()), f"Not offloaded: {[k for k, v in offloaded_modules.items() if v.device.type != 'cpu']}", ) # 2. check if all offloaded modules have hooks installed self.assertTrue( all(hasattr(v, "_hf_hook") for k, v in offloaded_modules.items()), f"No hook attached: {[k for k, v in offloaded_modules.items() if not hasattr(v, '_hf_hook')]}", ) # 3. check if all offloaded modules have correct type of hooks installed, should be `CpuOffload` offloaded_modules_with_incorrect_hooks = {} for k, v in offloaded_modules.items(): if hasattr(v, "_hf_hook") and not isinstance(v._hf_hook, accelerate.hooks.CpuOffload): offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook) self.assertTrue( len(offloaded_modules_with_incorrect_hooks) == 0, f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", ) @require_accelerator @require_accelerate_version_greater("0.17.0") def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4): import accelerate generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.set_progress_bar_config(disable=None) pipe.enable_model_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs)[0] pipe.enable_model_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(generator_device) output_with_offload_twice = pipe(**inputs)[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_with_offload_twice)).max() self.assertLess( max_diff, expected_max_diff, "running CPU offloading 2nd time should not affect the inference results" ) # make sure all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are offloaded correctly offloaded_modules = { k: v for k, v in pipe.components.items() if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload } # 1. check if all offloaded modules are saved to cpu self.assertTrue( all(v.device.type == "cpu" for v in offloaded_modules.values()), f"Not offloaded: {[k for k, v in offloaded_modules.items() if v.device.type != 'cpu']}", ) # 2. check if all offloaded modules have hooks installed self.assertTrue( all(hasattr(v, "_hf_hook") for k, v in offloaded_modules.items()), f"No hook attached: {[k for k, v in offloaded_modules.items() if not hasattr(v, '_hf_hook')]}", ) # 3. check if all offloaded modules have correct type of hooks installed, should be `CpuOffload` offloaded_modules_with_incorrect_hooks = {} for k, v in offloaded_modules.items(): if hasattr(v, "_hf_hook") and not isinstance(v._hf_hook, accelerate.hooks.CpuOffload): offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook) self.assertTrue( len(offloaded_modules_with_incorrect_hooks) == 0, f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", ) @require_accelerator @require_accelerate_version_greater("0.14.0") def test_sequential_offload_forward_pass_twice(self, expected_max_diff=2e-4): import accelerate generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.set_progress_bar_config(disable=None) pipe.enable_sequential_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs)[0] pipe.enable_sequential_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(generator_device) output_with_offload_twice = pipe(**inputs)[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_with_offload_twice)).max() self.assertLess( max_diff, expected_max_diff, "running sequential offloading second time should have the inference results" ) # make sure all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are offloaded correctly offloaded_modules = { k: v for k, v in pipe.components.items() if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload } # 1. check if all offloaded modules are moved to meta device self.assertTrue( all(v.device.type == "meta" for v in offloaded_modules.values()), f"Not offloaded: {[k for k, v in offloaded_modules.items() if v.device.type != 'meta']}", ) # 2. check if all offloaded modules have hook installed self.assertTrue( all(hasattr(v, "_hf_hook") for k, v in offloaded_modules.items()), f"No hook attached: {[k for k, v in offloaded_modules.items() if not hasattr(v, '_hf_hook')]}", ) # 3. check if all offloaded modules have correct hooks installed, should be either one of these two # - `AlignDevicesHook` # - a SequentialHook` that contains `AlignDevicesHook` offloaded_modules_with_incorrect_hooks = {} for k, v in offloaded_modules.items(): if hasattr(v, "_hf_hook"): if isinstance(v._hf_hook, accelerate.hooks.SequentialHook): # if it is a `SequentialHook`, we loop through its `hooks` attribute to check if it only contains `AlignDevicesHook` for hook in v._hf_hook.hooks: if not isinstance(hook, accelerate.hooks.AlignDevicesHook): offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook.hooks[0]) elif not isinstance(v._hf_hook, accelerate.hooks.AlignDevicesHook): offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook) self.assertTrue( len(offloaded_modules_with_incorrect_hooks) == 0, f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass() def _test_xformers_attention_forwardGenerator_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-4 ): if not self.test_xformers_attention: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs)[0] output_without_offload = ( output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload ) pipe.enable_xformers_memory_efficient_attention() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs)[0] output_with_offload = ( output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload ) if test_max_difference: max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "XFormers attention should not affect the inference results") if test_mean_pixel_difference: assert_mean_pixel_difference(output_with_offload[0], output_without_offload[0]) def test_num_images_per_prompt(self): sig = inspect.signature(self.pipeline_class.__call__) if "num_images_per_prompt" not in sig.parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_sizes = [1, 2] num_images_per_prompts = [1, 2] for batch_size in batch_sizes: for num_images_per_prompt in num_images_per_prompts: inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] assert images.shape[0] == batch_size * num_images_per_prompt def test_cfg(self): sig = inspect.signature(self.pipeline_class.__call__) if "guidance_scale" not in sig.parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["guidance_scale"] = 1.0 out_no_cfg = pipe(**inputs)[0] inputs["guidance_scale"] = 7.5 out_cfg = pipe(**inputs)[0] assert out_cfg.shape == out_no_cfg.shape def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # Test passing in a subset inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] inputs["output_type"] = "latent" output = pipe(**inputs)[0] # Test passing in a everything inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 def test_callback_cfg(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return if "guidance_scale" not in sig.parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_increase_guidance(pipe, i, t, callback_kwargs): pipe._guidance_scale += 1.0 return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # use cfg guidance because some pipelines modify the shape of the latents # outside of the denoising loop inputs["guidance_scale"] = 2.0 inputs["callback_on_step_end"] = callback_increase_guidance inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs _ = pipe(**inputs)[0] # we increase the guidance scale by 1.0 at every step # check that the guidance scale is increased by the number of scheduler timesteps # accounts for models that modify the number of inference steps based on strength assert pipe.guidance_scale == (inputs["guidance_scale"] + pipe.num_timesteps) def test_serialization_with_variants(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) model_components = [ component_name for component_name, component in pipe.components.items() if isinstance(component, nn.Module) ] variant = "fp16" with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, variant=variant, safe_serialization=False) with open(f"{tmpdir}/model_index.json", "r") as f: config = json.load(f) for subfolder in os.listdir(tmpdir): if not os.path.isfile(subfolder) and subfolder in model_components: folder_path = os.path.join(tmpdir, subfolder) is_folder = os.path.isdir(folder_path) and subfolder in config assert is_folder and any(p.split(".")[1].startswith(variant) for p in os.listdir(folder_path)) def test_loading_with_variants(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) variant = "fp16" def is_nan(tensor): if tensor.ndimension() == 0: has_nan = torch.isnan(tensor).item() else: has_nan = torch.isnan(tensor).any() return has_nan with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, variant=variant, safe_serialization=False) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, variant=variant) model_components_pipe = { component_name: component for component_name, component in pipe.components.items() if isinstance(component, nn.Module) } model_components_pipe_loaded = { component_name: component for component_name, component in pipe_loaded.components.items() if isinstance(component, nn.Module) } for component_name in model_components_pipe: pipe_component = model_components_pipe[component_name] pipe_loaded_component = model_components_pipe_loaded[component_name] for p1, p2 in zip(pipe_component.parameters(), pipe_loaded_component.parameters()): # nan check for luminanext (mps). if not (is_nan(p1) and is_nan(p2)): self.assertTrue(torch.equal(p1, p2)) def test_loading_with_incorrect_variants_raises_error(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) variant = "fp16" with tempfile.TemporaryDirectory() as tmpdir: # Don't save with variants. pipe.save_pretrained(tmpdir, safe_serialization=False) with self.assertRaises(ValueError) as error: _ = self.pipeline_class.from_pretrained(tmpdir, variant=variant) assert f"You are trying to load the model files of the `variant={variant}`" in str(error.exception) def test_StableDiffusionMixin_component(self): """Any pipeline that have LDMFuncMixin should have vae and unet components.""" if not issubclass(self.pipeline_class, StableDiffusionMixin): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) self.assertTrue(hasattr(pipe, "vae") and isinstance(pipe.vae, (AutoencoderKL, AutoencoderTiny))) self.assertTrue( hasattr(pipe, "unet") and isinstance( pipe.unet, (UNet2DConditionModel, UNet3DConditionModel, I2VGenXLUNet, UNetMotionModel, UNetControlNetXSModel), ) ) @require_hf_hub_version_greater("0.26.5") @require_transformers_version_greater("4.47.1") def test_save_load_dduf(self, atol=1e-4, rtol=1e-4): if not self.supports_dduf: return from huggingface_hub import export_folder_as_dduf components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device="cpu") inputs.pop("generator") inputs["generator"] = torch.manual_seed(0) pipeline_out = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: dduf_filename = os.path.join(tmpdir, f"{pipe.__class__.__name__.lower()}.dduf") pipe.save_pretrained(tmpdir, safe_serialization=True) export_folder_as_dduf(dduf_filename, folder_path=tmpdir) loaded_pipe = self.pipeline_class.from_pretrained(tmpdir, dduf_file=dduf_filename).to(torch_device) inputs["generator"] = torch.manual_seed(0) loaded_pipeline_out = loaded_pipe(**inputs)[0] if isinstance(pipeline_out, np.ndarray) and isinstance(loaded_pipeline_out, np.ndarray): assert np.allclose(pipeline_out, loaded_pipeline_out, atol=atol, rtol=rtol) elif isinstance(pipeline_out, torch.Tensor) and isinstance(loaded_pipeline_out, torch.Tensor): assert torch.allclose(pipeline_out, loaded_pipeline_out, atol=atol, rtol=rtol) def test_layerwise_casting_inference(self): if not self.test_layerwise_casting: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device, dtype=torch.bfloat16) pipe.set_progress_bar_config(disable=None) denoiser = pipe.transformer if hasattr(pipe, "transformer") else pipe.unet denoiser.enable_layerwise_casting(storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16) inputs = self.get_dummy_inputs(torch_device) _ = pipe(**inputs)[0] @is_staging_test class PipelinePushToHubTester(unittest.TestCase): identifier = uuid.uuid4() repo_id = f"test-pipeline-{identifier}" org_repo_id = f"valid_org/{repo_id}-org" def get_pipeline_components(self): unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) with tempfile.TemporaryDirectory() as tmpdir: dummy_vocab = {"<|startoftext|>": 0, "<|endoftext|>": 1, "!": 2} vocab_path = os.path.join(tmpdir, "vocab.json") with open(vocab_path, "w") as f: json.dump(dummy_vocab, f) merges = "Ġ t\nĠt h" merges_path = os.path.join(tmpdir, "merges.txt") with open(merges_path, "w") as f: f.writelines(merges) tokenizer = CLIPTokenizer(vocab_file=vocab_path, merges_file=merges_path) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def test_push_to_hub(self): components = self.get_pipeline_components() pipeline = StableDiffusionPipeline(**components) pipeline.push_to_hub(self.repo_id, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}", subfolder="unet") unet = components["unet"] for p1, p2 in zip(unet.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=TOKEN, repo_id=self.repo_id) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: pipeline.save_pretrained(tmp_dir, repo_id=self.repo_id, push_to_hub=True, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}", subfolder="unet") for p1, p2 in zip(unet.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(self.repo_id, token=TOKEN) def test_push_to_hub_in_organization(self): components = self.get_pipeline_components() pipeline = StableDiffusionPipeline(**components) pipeline.push_to_hub(self.org_repo_id, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id, subfolder="unet") unet = components["unet"] for p1, p2 in zip(unet.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=TOKEN, repo_id=self.org_repo_id) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: pipeline.save_pretrained(tmp_dir, push_to_hub=True, token=TOKEN, repo_id=self.org_repo_id) new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id, subfolder="unet") for p1, p2 in zip(unet.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(self.org_repo_id, token=TOKEN) @unittest.skipIf( not is_jinja_available(), reason="Model card tests cannot be performed without Jinja installed.", ) def test_push_to_hub_library_name(self): components = self.get_pipeline_components() pipeline = StableDiffusionPipeline(**components) pipeline.push_to_hub(self.repo_id, token=TOKEN) model_card = ModelCard.load(f"{USER}/{self.repo_id}", token=TOKEN).data assert model_card.library_name == "diffusers" # Reset repo delete_repo(self.repo_id, token=TOKEN) # For SDXL and its derivative pipelines (such as ControlNet), we have the text encoders # and the tokenizers as optional components. So, we need to override the `test_save_load_optional_components()` # test for all such pipelines. This requires us to use a custom `encode_prompt()` function. class SDXLOptionalComponentsTesterMixin: def encode_prompt( self, tokenizers, text_encoders, prompt: str, num_images_per_prompt: int = 1, negative_prompt: str = None ): device = text_encoders[0].device if isinstance(prompt, str): prompt = [prompt] batch_size = len(prompt) prompt_embeds_list = [] for tokenizer, text_encoder in zip(tokenizers, text_encoders): text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.hidden_states[-2] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) if negative_prompt is None: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) else: negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_embeds_list = [] for tokenizer, text_encoder in zip(tokenizers, text_encoders): uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # for classifier-free guidance # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) # for classifier-free guidance negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds def _test_save_load_optional_components(self, expected_max_difference=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) tokenizer = components.pop("tokenizer") tokenizer_2 = components.pop("tokenizer_2") text_encoder = components.pop("text_encoder") text_encoder_2 = components.pop("text_encoder_2") tokenizers = [tokenizer, tokenizer_2] if tokenizer is not None else [tokenizer_2] text_encoders = [text_encoder, text_encoder_2] if text_encoder is not None else [text_encoder_2] prompt = inputs.pop("prompt") ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt(tokenizers, text_encoders, prompt) inputs["prompt_embeds"] = prompt_embeds inputs["negative_prompt_embeds"] = negative_prompt_embeds inputs["pooled_prompt_embeds"] = pooled_prompt_embeds inputs["negative_pooled_prompt_embeds"] = negative_pooled_prompt_embeds output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(generator_device) _ = inputs.pop("prompt") inputs["prompt_embeds"] = prompt_embeds inputs["negative_prompt_embeds"] = negative_prompt_embeds inputs["pooled_prompt_embeds"] = pooled_prompt_embeds inputs["negative_pooled_prompt_embeds"] = negative_pooled_prompt_embeds output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) class PyramidAttentionBroadcastTesterMixin: pab_config = PyramidAttentionBroadcastConfig( spatial_attention_block_skip_range=2, spatial_attention_timestep_skip_range=(100, 800), spatial_attention_block_identifiers=["transformer_blocks"], ) def test_pyramid_attention_broadcast_layers(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator num_layers = 0 num_single_layers = 0 dummy_component_kwargs = {} dummy_component_parameters = inspect.signature(self.get_dummy_components).parameters if "num_layers" in dummy_component_parameters: num_layers = 2 dummy_component_kwargs["num_layers"] = num_layers if "num_single_layers" in dummy_component_parameters: num_single_layers = 2 dummy_component_kwargs["num_single_layers"] = num_single_layers components = self.get_dummy_components(**dummy_component_kwargs) pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) self.pab_config.current_timestep_callback = lambda: pipe.current_timestep denoiser = pipe.transformer if hasattr(pipe, "transformer") else pipe.unet denoiser.enable_cache(self.pab_config) expected_hooks = 0 if self.pab_config.spatial_attention_block_skip_range is not None: expected_hooks += num_layers + num_single_layers if self.pab_config.temporal_attention_block_skip_range is not None: expected_hooks += num_layers + num_single_layers if self.pab_config.cross_attention_block_skip_range is not None: expected_hooks += num_layers + num_single_layers denoiser = pipe.transformer if hasattr(pipe, "transformer") else pipe.unet count = 0 for module in denoiser.modules(): if hasattr(module, "_diffusers_hook"): hook = module._diffusers_hook.get_hook("pyramid_attention_broadcast") if hook is None: continue count += 1 self.assertTrue( isinstance(hook, PyramidAttentionBroadcastHook), "Hook should be of type PyramidAttentionBroadcastHook.", ) self.assertTrue(hook.state.cache is None, "Cache should be None at initialization.") self.assertEqual(count, expected_hooks, "Number of hooks should match the expected number.") # Perform dummy inference step to ensure state is updated def pab_state_check_callback(pipe, i, t, kwargs): for module in denoiser.modules(): if hasattr(module, "_diffusers_hook"): hook = module._diffusers_hook.get_hook("pyramid_attention_broadcast") if hook is None: continue self.assertTrue( hook.state.cache is not None, "Cache should have updated during inference.", ) self.assertTrue( hook.state.iteration == i + 1, "Hook iteration state should have updated during inference.", ) return {} inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 2 inputs["callback_on_step_end"] = pab_state_check_callback pipe(**inputs)[0] # After inference, reset_stateful_hooks is called within the pipeline, which should have reset the states for module in denoiser.modules(): if hasattr(module, "_diffusers_hook"): hook = module._diffusers_hook.get_hook("pyramid_attention_broadcast") if hook is None: continue self.assertTrue( hook.state.cache is None, "Cache should be reset to None after inference.", ) self.assertTrue( hook.state.iteration == 0, "Iteration should be reset to 0 after inference.", ) def test_pyramid_attention_broadcast_inference(self, expected_atol: float = 0.2): # We need to use higher tolerance because we are using a random model. With a converged/trained # model, the tolerance can be lower. device = "cpu" # ensure determinism for the device-dependent torch.Generator num_layers = 2 components = self.get_dummy_components(num_layers=num_layers) pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) # Run inference without PAB inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 4 output = pipe(**inputs)[0] original_image_slice = output.flatten() original_image_slice = np.concatenate((original_image_slice[:8], original_image_slice[-8:])) # Run inference with PAB enabled self.pab_config.current_timestep_callback = lambda: pipe.current_timestep denoiser = pipe.transformer if hasattr(pipe, "transformer") else pipe.unet denoiser.enable_cache(self.pab_config) inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 4 output = pipe(**inputs)[0] image_slice_pab_enabled = output.flatten() image_slice_pab_enabled = np.concatenate((image_slice_pab_enabled[:8], image_slice_pab_enabled[-8:])) # Run inference with PAB disabled denoiser.disable_cache() inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 4 output = pipe(**inputs)[0] image_slice_pab_disabled = output.flatten() image_slice_pab_disabled = np.concatenate((image_slice_pab_disabled[:8], image_slice_pab_disabled[-8:])) assert np.allclose( original_image_slice, image_slice_pab_enabled, atol=expected_atol ), "PAB outputs should not differ much in specified timestep range." assert np.allclose( original_image_slice, image_slice_pab_disabled, atol=1e-4 ), "Outputs from normal inference and after disabling cache should not differ." # Some models (e.g. unCLIP) are extremely likely to significantly deviate depending on which hardware is used. # This helper function is used to check that the image doesn't deviate on average more than 10 pixels from a # reference image. def assert_mean_pixel_difference(image, expected_image, expected_max_diff=10): image = np.asarray(DiffusionPipeline.numpy_to_pil(image)[0], dtype=np.float32) expected_image = np.asarray(DiffusionPipeline.numpy_to_pil(expected_image)[0], dtype=np.float32) avg_diff = np.abs(image - expected_image).mean() assert avg_diff < expected_max_diff, f"Error image deviates {avg_diff} pixels on average"
diffusers/tests/pipelines/test_pipelines_common.py/0
{ "file_path": "diffusers/tests/pipelines/test_pipelines_common.py", "repo_id": "diffusers", "token_count": 48609 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, WuerstchenPriorPipeline from diffusers.pipelines.wuerstchen import WuerstchenPrior from diffusers.utils.import_utils import is_peft_available from diffusers.utils.testing_utils import enable_full_determinism, require_peft_backend, skip_mps, torch_device if is_peft_available(): from peft import LoraConfig from peft.tuners.tuners_utils import BaseTunerLayer from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class WuerstchenPriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WuerstchenPriorPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["text_encoder_hidden_states"] @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config).eval() @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "c_in": 2, "c": 8, "depth": 2, "c_cond": 32, "c_r": 8, "nhead": 2, } model = WuerstchenPrior(**model_kwargs) return model.eval() def get_dummy_components(self): prior = self.dummy_prior text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer scheduler = DDPMWuerstchenScheduler() components = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def test_wuerstchen_prior(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.image_embeddings image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0] image_slice = image[0, 0, 0, -10:] image_from_tuple_slice = image_from_tuple[0, 0, 0, -10:] assert image.shape == (1, 2, 24, 24) expected_slice = np.array( [ -7172.837, -3438.855, -1093.312, 388.8835, -7471.467, -7998.1206, -5328.259, 218.00089, -2731.5745, -8056.734, ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( expected_max_diff=3e-1, ) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, ) @unittest.skip(reason="flaky for now") def test_float16_inference(self): super().test_float16_inference() # override because we need to make sure latent_mean and latent_std to be 0 def test_callback_inputs(self): components = self.get_dummy_components() components["latent_mean"] = 0 components["latent_std"] = 0 pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 def check_if_lora_correctly_set(self, model) -> bool: """ Checks if the LoRA layers are correctly set with peft """ for module in model.modules(): if isinstance(module, BaseTunerLayer): return True return False def get_lora_components(self): prior = self.dummy_prior prior_lora_config = LoraConfig( r=4, lora_alpha=4, target_modules=["to_q", "to_k", "to_v", "to_out.0"], init_lora_weights=False ) return prior, prior_lora_config @require_peft_backend def test_inference_with_prior_lora(self): _, prior_lora_config = self.get_lora_components() device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output_no_lora = pipe(**self.get_dummy_inputs(device)) image_embed = output_no_lora.image_embeddings self.assertTrue(image_embed.shape == (1, 2, 24, 24)) pipe.prior.add_adapter(prior_lora_config) self.assertTrue(self.check_if_lora_correctly_set(pipe.prior), "Lora not correctly set in prior") output_lora = pipe(**self.get_dummy_inputs(device)) lora_image_embed = output_lora.image_embeddings self.assertTrue(image_embed.shape == lora_image_embed.shape)
diffusers/tests/pipelines/wuerstchen/test_wuerstchen_prior.py/0
{ "file_path": "diffusers/tests/pipelines/wuerstchen/test_wuerstchen_prior.py", "repo_id": "diffusers", "token_count": 3942 }
import torch from diffusers import SASolverScheduler from diffusers.utils.testing_utils import require_torchsde, torch_device from .test_schedulers import SchedulerCommonTest @require_torchsde class SASolverSchedulerTest(SchedulerCommonTest): scheduler_classes = (SASolverScheduler,) forward_default_kwargs = (("num_inference_steps", 10),) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] scheduler.model_outputs = dummy_past_residuals[ : max( scheduler.config.predictor_order, scheduler.config.corrector_order - 1, ) ] time_step_0 = scheduler.timesteps[5] time_step_1 = scheduler.timesteps[6] output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) generator = torch.manual_seed(0) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t, generator=generator) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["cpu"]: assert abs(result_sum.item() - 337.394287109375) < 1e-2 assert abs(result_mean.item() - 0.43931546807289124) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 329.1999816894531) < 1e-2 assert abs(result_mean.item() - 0.4286458194255829) < 1e-3 def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) generator = torch.manual_seed(0) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t, generator=generator) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["cpu"]: assert abs(result_sum.item() - 193.1467742919922) < 1e-2 assert abs(result_mean.item() - 0.2514931857585907) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 193.4154052734375) < 1e-2 assert abs(result_mean.item() - 0.2518429756164551) < 1e-3 def test_full_loop_device(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma generator = torch.manual_seed(0) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["cpu"]: assert abs(result_sum.item() - 337.394287109375) < 1e-2 assert abs(result_mean.item() - 0.43931546807289124) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 337.394287109375) < 1e-2 assert abs(result_mean.item() - 0.4393154978752136) < 1e-3 def test_full_loop_device_karras_sigmas(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma sample = sample.to(torch_device) generator = torch.manual_seed(0) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["cpu"]: assert abs(result_sum.item() - 837.2554931640625) < 1e-2 assert abs(result_mean.item() - 1.0901764631271362) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 837.25537109375) < 1e-2 assert abs(result_mean.item() - 1.0901763439178467) < 1e-2 def test_beta_sigmas(self): self.check_over_configs(use_beta_sigmas=True) def test_exponential_sigmas(self): self.check_over_configs(use_exponential_sigmas=True)
diffusers/tests/schedulers/test_scheduler_sasolver.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_sasolver.py", "repo_id": "diffusers", "token_count": 3611 }
import gc import tempfile import unittest import torch from diffusers import ControlNetModel, StableDiffusionControlNetInpaintPipeline from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name from diffusers.utils import load_image from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from .single_file_testing_utils import ( SDSingleFileTesterMixin, download_diffusers_config, download_original_config, download_single_file_checkpoint, ) enable_full_determinism() @slow @require_torch_accelerator class StableDiffusionControlNetInpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionControlNetInpaintPipeline ckpt_path = "https://huggingface.co/botp/stable-diffusion-v1-5-inpainting/blob/main/sd-v1-5-inpainting.ckpt" original_config = "https://raw.githubusercontent.com/runwayml/stable-diffusion/main/configs/stable-diffusion/v1-inpainting-inference.yaml" repo_id = "stable-diffusion-v1-5/stable-diffusion-inpainting" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self): control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ).resize((512, 512)) inputs = { "prompt": "bird", "image": image, "control_image": control_image, "mask_image": mask_image, "generator": torch.Generator(device="cpu").manual_seed(0), "num_inference_steps": 3, "output_type": "np", } return inputs def test_single_file_format_inference_is_same_as_pretrained(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet, safety_checker=None) pipe.unet.set_default_attn_processor() pipe.enable_model_cpu_offload(device=torch_device) pipe_sf = self.pipeline_class.from_single_file(self.ckpt_path, controlnet=controlnet, safety_checker=None) pipe_sf.unet.set_default_attn_processor() pipe_sf.enable_model_cpu_offload(device=torch_device) inputs = self.get_inputs() output = pipe(**inputs).images[0] inputs = self.get_inputs() output_sf = pipe_sf(**inputs).images[0] max_diff = numpy_cosine_similarity_distance(output_sf.flatten(), output.flatten()) assert max_diff < 2e-3 def test_single_file_components(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipe = self.pipeline_class.from_pretrained( self.repo_id, variant="fp16", safety_checker=None, controlnet=controlnet ) pipe_single_file = self.pipeline_class.from_single_file( self.ckpt_path, safety_checker=None, controlnet=controlnet, ) super()._compare_component_configs(pipe, pipe_single_file) def test_single_file_components_local_files_only(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipe = self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None, controlnet=controlnet) with tempfile.TemporaryDirectory() as tmpdir: repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) pipe_single_file = self.pipeline_class.from_single_file( local_ckpt_path, controlnet=controlnet, safety_checker=None, local_files_only=True ) super()._compare_component_configs(pipe, pipe_single_file) @unittest.skip("runwayml original config repo does not exist") def test_single_file_components_with_original_config(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny", variant="fp16") pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet) pipe_single_file = self.pipeline_class.from_single_file( self.ckpt_path, controlnet=controlnet, original_config=self.original_config ) super()._compare_component_configs(pipe, pipe_single_file) @unittest.skip("runwayml original config repo does not exist") def test_single_file_components_with_original_config_local_files_only(self): controlnet = ControlNetModel.from_pretrained( "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16" ) pipe = self.pipeline_class.from_pretrained( self.repo_id, controlnet=controlnet, safety_checker=None, ) with tempfile.TemporaryDirectory() as tmpdir: repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) local_original_config = download_original_config(self.original_config, tmpdir) pipe_single_file = self.pipeline_class.from_single_file( local_ckpt_path, original_config=local_original_config, controlnet=controlnet, safety_checker=None, local_files_only=True, ) super()._compare_component_configs(pipe, pipe_single_file) def test_single_file_components_with_diffusers_config(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny", variant="fp16") pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet) pipe_single_file = self.pipeline_class.from_single_file( self.ckpt_path, controlnet=controlnet, config=self.repo_id, ) super()._compare_component_configs(pipe, pipe_single_file) def test_single_file_components_with_diffusers_config_local_files_only(self): controlnet = ControlNetModel.from_pretrained( "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16", ) pipe = self.pipeline_class.from_pretrained( self.repo_id, controlnet=controlnet, safety_checker=None, ) with tempfile.TemporaryDirectory() as tmpdir: repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) local_diffusers_config = download_diffusers_config(self.repo_id, tmpdir) pipe_single_file = self.pipeline_class.from_single_file( local_ckpt_path, config=local_diffusers_config, controlnet=controlnet, safety_checker=None, local_files_only=True, ) super()._compare_component_configs(pipe, pipe_single_file) def test_single_file_setting_pipeline_dtype_to_fp16(self): controlnet = ControlNetModel.from_pretrained( "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16" ) single_file_pipe = self.pipeline_class.from_single_file( self.ckpt_path, controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16 ) super().test_single_file_setting_pipeline_dtype_to_fp16(single_file_pipe)
diffusers/tests/single_file/test_stable_diffusion_controlnet_inpaint_single_file.py/0
{ "file_path": "diffusers/tests/single_file/test_stable_diffusion_controlnet_inpaint_single_file.py", "repo_id": "diffusers", "token_count": 3691 }
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import inspect import os import re import warnings from collections import OrderedDict from difflib import get_close_matches from pathlib import Path from diffusers.models.auto import get_values from diffusers.utils import ENV_VARS_TRUE_VALUES, is_flax_available, is_torch_available # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_repo.py PATH_TO_DIFFUSERS = "src/diffusers" PATH_TO_TESTS = "tests" PATH_TO_DOC = "docs/source/en" # Update this list with models that are supposed to be private. PRIVATE_MODELS = [ "DPRSpanPredictor", "RealmBertModel", "T5Stack", "TFDPRSpanPredictor", ] # Update this list for models that are not tested with a comment explaining the reason it should not be. # Being in this list is an exception and should **not** be the rule. IGNORE_NON_TESTED = PRIVATE_MODELS.copy() + [ # models to ignore for not tested "OPTDecoder", # Building part of bigger (tested) model. "DecisionTransformerGPT2Model", # Building part of bigger (tested) model. "SegformerDecodeHead", # Building part of bigger (tested) model. "PLBartEncoder", # Building part of bigger (tested) model. "PLBartDecoder", # Building part of bigger (tested) model. "PLBartDecoderWrapper", # Building part of bigger (tested) model. "BigBirdPegasusEncoder", # Building part of bigger (tested) model. "BigBirdPegasusDecoder", # Building part of bigger (tested) model. "BigBirdPegasusDecoderWrapper", # Building part of bigger (tested) model. "DetrEncoder", # Building part of bigger (tested) model. "DetrDecoder", # Building part of bigger (tested) model. "DetrDecoderWrapper", # Building part of bigger (tested) model. "M2M100Encoder", # Building part of bigger (tested) model. "M2M100Decoder", # Building part of bigger (tested) model. "Speech2TextEncoder", # Building part of bigger (tested) model. "Speech2TextDecoder", # Building part of bigger (tested) model. "LEDEncoder", # Building part of bigger (tested) model. "LEDDecoder", # Building part of bigger (tested) model. "BartDecoderWrapper", # Building part of bigger (tested) model. "BartEncoder", # Building part of bigger (tested) model. "BertLMHeadModel", # Needs to be setup as decoder. "BlenderbotSmallEncoder", # Building part of bigger (tested) model. "BlenderbotSmallDecoderWrapper", # Building part of bigger (tested) model. "BlenderbotEncoder", # Building part of bigger (tested) model. "BlenderbotDecoderWrapper", # Building part of bigger (tested) model. "MBartEncoder", # Building part of bigger (tested) model. "MBartDecoderWrapper", # Building part of bigger (tested) model. "MegatronBertLMHeadModel", # Building part of bigger (tested) model. "MegatronBertEncoder", # Building part of bigger (tested) model. "MegatronBertDecoder", # Building part of bigger (tested) model. "MegatronBertDecoderWrapper", # Building part of bigger (tested) model. "PegasusEncoder", # Building part of bigger (tested) model. "PegasusDecoderWrapper", # Building part of bigger (tested) model. "DPREncoder", # Building part of bigger (tested) model. "ProphetNetDecoderWrapper", # Building part of bigger (tested) model. "RealmBertModel", # Building part of bigger (tested) model. "RealmReader", # Not regular model. "RealmScorer", # Not regular model. "RealmForOpenQA", # Not regular model. "ReformerForMaskedLM", # Needs to be setup as decoder. "Speech2Text2DecoderWrapper", # Building part of bigger (tested) model. "TFDPREncoder", # Building part of bigger (tested) model. "TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFModelMixin ?) "TFRobertaForMultipleChoice", # TODO: fix "TrOCRDecoderWrapper", # Building part of bigger (tested) model. "SeparableConv1D", # Building part of bigger (tested) model. "FlaxBartForCausalLM", # Building part of bigger (tested) model. "FlaxBertForCausalLM", # Building part of bigger (tested) model. Tested implicitly through FlaxRobertaForCausalLM. "OPTDecoderWrapper", ] # Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't # trigger the common tests. TEST_FILES_WITH_NO_COMMON_TESTS = [ "models/decision_transformer/test_modeling_decision_transformer.py", "models/camembert/test_modeling_camembert.py", "models/mt5/test_modeling_flax_mt5.py", "models/mbart/test_modeling_mbart.py", "models/mt5/test_modeling_mt5.py", "models/pegasus/test_modeling_pegasus.py", "models/camembert/test_modeling_tf_camembert.py", "models/mt5/test_modeling_tf_mt5.py", "models/xlm_roberta/test_modeling_tf_xlm_roberta.py", "models/xlm_roberta/test_modeling_flax_xlm_roberta.py", "models/xlm_prophetnet/test_modeling_xlm_prophetnet.py", "models/xlm_roberta/test_modeling_xlm_roberta.py", "models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py", "models/vision_text_dual_encoder/test_modeling_flax_vision_text_dual_encoder.py", "models/decision_transformer/test_modeling_decision_transformer.py", ] # Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and # should **not** be the rule. IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [ # models to ignore for model xxx mapping "DPTForDepthEstimation", "DecisionTransformerGPT2Model", "GLPNForDepthEstimation", "ViltForQuestionAnswering", "ViltForImagesAndTextClassification", "ViltForImageAndTextRetrieval", "ViltForMaskedLM", "XGLMEncoder", "XGLMDecoder", "XGLMDecoderWrapper", "PerceiverForMultimodalAutoencoding", "PerceiverForOpticalFlow", "SegformerDecodeHead", "FlaxBeitForMaskedImageModeling", "PLBartEncoder", "PLBartDecoder", "PLBartDecoderWrapper", "BeitForMaskedImageModeling", "CLIPTextModel", "CLIPVisionModel", "TFCLIPTextModel", "TFCLIPVisionModel", "FlaxCLIPTextModel", "FlaxCLIPVisionModel", "FlaxWav2Vec2ForCTC", "DetrForSegmentation", "DPRReader", "FlaubertForQuestionAnswering", "FlavaImageCodebook", "FlavaTextModel", "FlavaImageModel", "FlavaMultimodalModel", "GPT2DoubleHeadsModel", "LukeForMaskedLM", "LukeForEntityClassification", "LukeForEntityPairClassification", "LukeForEntitySpanClassification", "OpenAIGPTDoubleHeadsModel", "RagModel", "RagSequenceForGeneration", "RagTokenForGeneration", "RealmEmbedder", "RealmForOpenQA", "RealmScorer", "RealmReader", "TFDPRReader", "TFGPT2DoubleHeadsModel", "TFOpenAIGPTDoubleHeadsModel", "TFRagModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", "Wav2Vec2ForCTC", "HubertForCTC", "SEWForCTC", "SEWDForCTC", "XLMForQuestionAnswering", "XLNetForQuestionAnswering", "SeparableConv1D", "VisualBertForRegionToPhraseAlignment", "VisualBertForVisualReasoning", "VisualBertForQuestionAnswering", "VisualBertForMultipleChoice", "TFWav2Vec2ForCTC", "TFHubertForCTC", "MaskFormerForInstanceSegmentation", ] # Update this list for models that have multiple model types for the same # model doc MODEL_TYPE_TO_DOC_MAPPING = OrderedDict( [ ("data2vec-text", "data2vec"), ("data2vec-audio", "data2vec"), ("data2vec-vision", "data2vec"), ] ) # This is to make sure the transformers module imported is the one in the repo. spec = importlib.util.spec_from_file_location( "diffusers", os.path.join(PATH_TO_DIFFUSERS, "__init__.py"), submodule_search_locations=[PATH_TO_DIFFUSERS], ) diffusers = spec.loader.load_module() def check_model_list(): """Check the model list inside the transformers library.""" # Get the models from the directory structure of `src/diffusers/models/` models_dir = os.path.join(PATH_TO_DIFFUSERS, "models") _models = [] for model in os.listdir(models_dir): model_dir = os.path.join(models_dir, model) if os.path.isdir(model_dir) and "__init__.py" in os.listdir(model_dir): _models.append(model) # Get the models from the directory structure of `src/transformers/models/` models = [model for model in dir(diffusers.models) if not model.startswith("__")] missing_models = sorted(set(_models).difference(models)) if missing_models: raise Exception( f"The following models should be included in {models_dir}/__init__.py: {','.join(missing_models)}." ) # If some modeling modules should be ignored for all checks, they should be added in the nested list # _ignore_modules of this function. def get_model_modules(): """Get the model modules inside the transformers library.""" _ignore_modules = [ "modeling_auto", "modeling_encoder_decoder", "modeling_marian", "modeling_mmbt", "modeling_outputs", "modeling_retribert", "modeling_utils", "modeling_flax_auto", "modeling_flax_encoder_decoder", "modeling_flax_utils", "modeling_speech_encoder_decoder", "modeling_flax_speech_encoder_decoder", "modeling_flax_vision_encoder_decoder", "modeling_transfo_xl_utilities", "modeling_tf_auto", "modeling_tf_encoder_decoder", "modeling_tf_outputs", "modeling_tf_pytorch_utils", "modeling_tf_utils", "modeling_tf_transfo_xl_utilities", "modeling_tf_vision_encoder_decoder", "modeling_vision_encoder_decoder", ] modules = [] for model in dir(diffusers.models): # There are some magic dunder attributes in the dir, we ignore them if not model.startswith("__"): model_module = getattr(diffusers.models, model) for submodule in dir(model_module): if submodule.startswith("modeling") and submodule not in _ignore_modules: modeling_module = getattr(model_module, submodule) if inspect.ismodule(modeling_module): modules.append(modeling_module) return modules def get_models(module, include_pretrained=False): """Get the objects in module that are models.""" models = [] model_classes = (diffusers.ModelMixin, diffusers.TFModelMixin, diffusers.FlaxModelMixin) for attr_name in dir(module): if not include_pretrained and ("Pretrained" in attr_name or "PreTrained" in attr_name): continue attr = getattr(module, attr_name) if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__: models.append((attr_name, attr)) return models def is_a_private_model(model): """Returns True if the model should not be in the main init.""" if model in PRIVATE_MODELS: return True # Wrapper, Encoder and Decoder are all privates if model.endswith("Wrapper"): return True if model.endswith("Encoder"): return True if model.endswith("Decoder"): return True return False def check_models_are_in_init(): """Checks all models defined in the library are in the main init.""" models_not_in_init = [] dir_transformers = dir(diffusers) for module in get_model_modules(): models_not_in_init += [ model[0] for model in get_models(module, include_pretrained=True) if model[0] not in dir_transformers ] # Remove private models models_not_in_init = [model for model in models_not_in_init if not is_a_private_model(model)] if len(models_not_in_init) > 0: raise Exception(f"The following models should be in the main init: {','.join(models_not_in_init)}.") # If some test_modeling files should be ignored when checking models are all tested, they should be added in the # nested list _ignore_files of this function. def get_model_test_files(): """Get the model test files. The returned files should NOT contain the `tests` (i.e. `PATH_TO_TESTS` defined in this script). They will be considered as paths relative to `tests`. A caller has to use `os.path.join(PATH_TO_TESTS, ...)` to access the files. """ _ignore_files = [ "test_modeling_common", "test_modeling_encoder_decoder", "test_modeling_flax_encoder_decoder", "test_modeling_flax_speech_encoder_decoder", "test_modeling_marian", "test_modeling_tf_common", "test_modeling_tf_encoder_decoder", ] test_files = [] # Check both `PATH_TO_TESTS` and `PATH_TO_TESTS/models` model_test_root = os.path.join(PATH_TO_TESTS, "models") model_test_dirs = [] for x in os.listdir(model_test_root): x = os.path.join(model_test_root, x) if os.path.isdir(x): model_test_dirs.append(x) for target_dir in [PATH_TO_TESTS] + model_test_dirs: for file_or_dir in os.listdir(target_dir): path = os.path.join(target_dir, file_or_dir) if os.path.isfile(path): filename = os.path.split(path)[-1] if "test_modeling" in filename and os.path.splitext(filename)[0] not in _ignore_files: file = os.path.join(*path.split(os.sep)[1:]) test_files.append(file) return test_files # This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class # for the all_model_classes variable. def find_tested_models(test_file): """Parse the content of test_file to detect what's in all_model_classes""" # This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8", newline="\n") as f: content = f.read() all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content) # Check with one less parenthesis as well all_models += re.findall(r"all_model_classes\s+=\s+\(([^\)]*)\)", content) if len(all_models) > 0: model_tested = [] for entry in all_models: for line in entry.split(","): name = line.strip() if len(name) > 0: model_tested.append(name) return model_tested def check_models_are_tested(module, test_file): """Check models defined in module are tested in test_file.""" # XxxModelMixin are not tested defined_models = get_models(module) tested_models = find_tested_models(test_file) if tested_models is None: if test_file.replace(os.path.sep, "/") in TEST_FILES_WITH_NO_COMMON_TESTS: return return [ f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. " + "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file " + "`utils/check_repo.py`." ] failures = [] for model_name, _ in defined_models: if model_name not in tested_models and model_name not in IGNORE_NON_TESTED: failures.append( f"{model_name} is defined in {module.__name__} but is not tested in " + f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file." + "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`" + "in the file `utils/check_repo.py`." ) return failures def check_all_models_are_tested(): """Check all models are properly tested.""" modules = get_model_modules() test_files = get_model_test_files() failures = [] for module in modules: test_file = [file for file in test_files if f"test_{module.__name__.split('.')[-1]}.py" in file] if len(test_file) == 0: failures.append(f"{module.__name__} does not have its corresponding test file {test_file}.") elif len(test_file) > 1: failures.append(f"{module.__name__} has several test files: {test_file}.") else: test_file = test_file[0] new_failures = check_models_are_tested(module, test_file) if new_failures is not None: failures += new_failures if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def get_all_auto_configured_models(): """Return the list of all models in at least one auto class.""" result = set() # To avoid duplicates we concatenate all model classes in a set. if is_torch_available(): for attr_name in dir(diffusers.models.auto.modeling_auto): if attr_name.startswith("MODEL_") and attr_name.endswith("MAPPING_NAMES"): result = result | set(get_values(getattr(diffusers.models.auto.modeling_auto, attr_name))) if is_flax_available(): for attr_name in dir(diffusers.models.auto.modeling_flax_auto): if attr_name.startswith("FLAX_MODEL_") and attr_name.endswith("MAPPING_NAMES"): result = result | set(get_values(getattr(diffusers.models.auto.modeling_flax_auto, attr_name))) return list(result) def ignore_unautoclassed(model_name): """Rules to determine if `name` should be in an auto class.""" # Special white list if model_name in IGNORE_NON_AUTO_CONFIGURED: return True # Encoder and Decoder should be ignored if "Encoder" in model_name or "Decoder" in model_name: return True return False def check_models_are_auto_configured(module, all_auto_models): """Check models defined in module are each in an auto class.""" defined_models = get_models(module) failures = [] for model_name, _ in defined_models: if model_name not in all_auto_models and not ignore_unautoclassed(model_name): failures.append( f"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. " "If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file " "`utils/check_repo.py`." ) return failures def check_all_models_are_auto_configured(): """Check all models are each in an auto class.""" missing_backends = [] if not is_torch_available(): missing_backends.append("PyTorch") if not is_flax_available(): missing_backends.append("Flax") if len(missing_backends) > 0: missing = ", ".join(missing_backends) if os.getenv("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: raise Exception( "Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the " f"Transformers repo, the following are missing: {missing}." ) else: warnings.warn( "Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the " f"Transformers repo, the following are missing: {missing}. While it's probably fine as long as you " "didn't make any change in one of those backends modeling files, you should probably execute the " "command above to be on the safe side." ) modules = get_model_modules() all_auto_models = get_all_auto_configured_models() failures = [] for module in modules: new_failures = check_models_are_auto_configured(module, all_auto_models) if new_failures is not None: failures += new_failures if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) _re_decorator = re.compile(r"^\s*@(\S+)\s+$") def check_decorator_order(filename): """Check that in the test file `filename` the slow decorator is always last.""" with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() decorator_before = None errors = [] for i, line in enumerate(lines): search = _re_decorator.search(line) if search is not None: decorator_name = search.groups()[0] if decorator_before is not None and decorator_name.startswith("parameterized"): errors.append(i) decorator_before = decorator_name elif decorator_before is not None: decorator_before = None return errors def check_all_decorator_order(): """Check that in all test files, the slow decorator is always last.""" errors = [] for fname in os.listdir(PATH_TO_TESTS): if fname.endswith(".py"): filename = os.path.join(PATH_TO_TESTS, fname) new_errors = check_decorator_order(filename) errors += [f"- {filename}, line {i}" for i in new_errors] if len(errors) > 0: msg = "\n".join(errors) raise ValueError( "The parameterized decorator (and its variants) should always be first, but this is not the case in the" f" following files:\n{msg}" ) def find_all_documented_objects(): """Parse the content of all doc files to detect which classes and functions it documents""" documented_obj = [] for doc_file in Path(PATH_TO_DOC).glob("**/*.rst"): with open(doc_file, "r", encoding="utf-8", newline="\n") as f: content = f.read() raw_doc_objs = re.findall(r"(?:autoclass|autofunction):: transformers.(\S+)\s+", content) documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs] for doc_file in Path(PATH_TO_DOC).glob("**/*.md"): with open(doc_file, "r", encoding="utf-8", newline="\n") as f: content = f.read() raw_doc_objs = re.findall(r"\[\[autodoc\]\]\s+(\S+)\s+", content) documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs] return documented_obj # One good reason for not being documented is to be deprecated. Put in this list deprecated objects. DEPRECATED_OBJECTS = [ "AutoModelWithLMHead", "BartPretrainedModel", "DataCollator", "DataCollatorForSOP", "GlueDataset", "GlueDataTrainingArguments", "LineByLineTextDataset", "LineByLineWithRefDataset", "LineByLineWithSOPTextDataset", "PretrainedBartModel", "PretrainedFSMTModel", "SingleSentenceClassificationProcessor", "SquadDataTrainingArguments", "SquadDataset", "SquadExample", "SquadFeatures", "SquadV1Processor", "SquadV2Processor", "TFAutoModelWithLMHead", "TFBartPretrainedModel", "TextDataset", "TextDatasetForNextSentencePrediction", "Wav2Vec2ForMaskedLM", "Wav2Vec2Tokenizer", "glue_compute_metrics", "glue_convert_examples_to_features", "glue_output_modes", "glue_processors", "glue_tasks_num_labels", "squad_convert_examples_to_features", "xnli_compute_metrics", "xnli_output_modes", "xnli_processors", "xnli_tasks_num_labels", "TFTrainer", "TFTrainingArguments", ] # Exceptionally, some objects should not be documented after all rules passed. # ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT! UNDOCUMENTED_OBJECTS = [ "AddedToken", # This is a tokenizers class. "BasicTokenizer", # Internal, should never have been in the main init. "CharacterTokenizer", # Internal, should never have been in the main init. "DPRPretrainedReader", # Like an Encoder. "DummyObject", # Just picked by mistake sometimes. "MecabTokenizer", # Internal, should never have been in the main init. "ModelCard", # Internal type. "SqueezeBertModule", # Internal building block (should have been called SqueezeBertLayer) "TFDPRPretrainedReader", # Like an Encoder. "TransfoXLCorpus", # Internal type. "WordpieceTokenizer", # Internal, should never have been in the main init. "absl", # External module "add_end_docstrings", # Internal, should never have been in the main init. "add_start_docstrings", # Internal, should never have been in the main init. "cached_path", # Internal used for downloading models. "convert_tf_weight_name_to_pt_weight_name", # Internal used to convert model weights "logger", # Internal logger "logging", # External module "requires_backends", # Internal function ] # This list should be empty. Objects in it should get their own doc page. SHOULD_HAVE_THEIR_OWN_PAGE = [ # Benchmarks "PyTorchBenchmark", "PyTorchBenchmarkArguments", "TensorFlowBenchmark", "TensorFlowBenchmarkArguments", ] def ignore_undocumented(name): """Rules to determine if `name` should be undocumented.""" # NOT DOCUMENTED ON PURPOSE. # Constants uppercase are not documented. if name.isupper(): return True # ModelMixins / Encoders / Decoders / Layers / Embeddings / Attention are not documented. if ( name.endswith("ModelMixin") or name.endswith("Decoder") or name.endswith("Encoder") or name.endswith("Layer") or name.endswith("Embeddings") or name.endswith("Attention") ): return True # Submodules are not documented. if os.path.isdir(os.path.join(PATH_TO_DIFFUSERS, name)) or os.path.isfile( os.path.join(PATH_TO_DIFFUSERS, f"{name}.py") ): return True # All load functions are not documented. if name.startswith("load_tf") or name.startswith("load_pytorch"): return True # is_xxx_available functions are not documented. if name.startswith("is_") and name.endswith("_available"): return True # Deprecated objects are not documented. if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS: return True # MMBT model does not really work. if name.startswith("MMBT"): return True if name in SHOULD_HAVE_THEIR_OWN_PAGE: return True return False def check_all_objects_are_documented(): """Check all models are properly documented.""" documented_objs = find_all_documented_objects() modules = diffusers._modules objects = [c for c in dir(diffusers) if c not in modules and not c.startswith("_")] undocumented_objs = [c for c in objects if c not in documented_objs and not ignore_undocumented(c)] if len(undocumented_objs) > 0: raise Exception( "The following objects are in the public init so should be documented:\n - " + "\n - ".join(undocumented_objs) ) check_docstrings_are_in_md() check_model_type_doc_match() def check_model_type_doc_match(): """Check all doc pages have a corresponding model type.""" model_doc_folder = Path(PATH_TO_DOC) / "model_doc" model_docs = [m.stem for m in model_doc_folder.glob("*.md")] model_types = list(diffusers.models.auto.configuration_auto.MODEL_NAMES_MAPPING.keys()) model_types = [MODEL_TYPE_TO_DOC_MAPPING[m] if m in MODEL_TYPE_TO_DOC_MAPPING else m for m in model_types] errors = [] for m in model_docs: if m not in model_types and m != "auto": close_matches = get_close_matches(m, model_types) error_message = f"{m} is not a proper model identifier." if len(close_matches) > 0: close_matches = "/".join(close_matches) error_message += f" Did you mean {close_matches}?" errors.append(error_message) if len(errors) > 0: raise ValueError( "Some model doc pages do not match any existing model type:\n" + "\n".join(errors) + "\nYou can add any missing model type to the `MODEL_NAMES_MAPPING` constant in " "models/auto/configuration_auto.py." ) # Re pattern to catch :obj:`xx`, :class:`xx`, :func:`xx` or :meth:`xx`. _re_rst_special_words = re.compile(r":(?:obj|func|class|meth):`([^`]+)`") # Re pattern to catch things between double backquotes. _re_double_backquotes = re.compile(r"(^|[^`])``([^`]+)``([^`]|$)") # Re pattern to catch example introduction. _re_rst_example = re.compile(r"^\s*Example.*::\s*$", flags=re.MULTILINE) def is_rst_docstring(docstring): """ Returns `True` if `docstring` is written in rst. """ if _re_rst_special_words.search(docstring) is not None: return True if _re_double_backquotes.search(docstring) is not None: return True if _re_rst_example.search(docstring) is not None: return True return False def check_docstrings_are_in_md(): """Check all docstrings are in md""" files_with_rst = [] for file in Path(PATH_TO_DIFFUSERS).glob("**/*.py"): with open(file, "r") as f: code = f.read() docstrings = code.split('"""') for idx, docstring in enumerate(docstrings): if idx % 2 == 0 or not is_rst_docstring(docstring): continue files_with_rst.append(file) break if len(files_with_rst) > 0: raise ValueError( "The following files have docstrings written in rst:\n" + "\n".join([f"- {f}" for f in files_with_rst]) + "\nTo fix this run `doc-builder convert path_to_py_file` after installing `doc-builder`\n" "(`pip install git+https://github.com/huggingface/doc-builder`)" ) def check_repo_quality(): """Check all models are properly tested and documented.""" print("Checking all models are included.") check_model_list() print("Checking all models are public.") check_models_are_in_init() print("Checking all models are properly tested.") check_all_decorator_order() check_all_models_are_tested() print("Checking all objects are properly documented.") check_all_objects_are_documented() print("Checking all models are in at least one auto class.") check_all_models_are_auto_configured() if __name__ == "__main__": check_repo_quality()
diffusers/utils/check_repo.py/0
{ "file_path": "diffusers/utils/check_repo.py", "repo_id": "diffusers", "token_count": 12370 }
# Video benchmark ## Questions What is the optimal trade-off between: - maximizing loading time with random access, - minimizing memory space on disk, - maximizing success rate of policies, - compatibility across devices/platforms for decoding videos (e.g. video players, web browsers). How to encode videos? - Which video codec (`-vcodec`) to use? h264, h265, AV1? - What pixel format to use (`-pix_fmt`)? `yuv444p` or `yuv420p`? - How much compression (`-crf`)? No compression with `0`, intermediate compression with `25` or extreme with `50+`? - Which frequency to chose for key frames (`-g`)? A key frame every `10` frames? How to decode videos? - Which `decoder`? `torchvision`, `torchaudio`, `ffmpegio`, `decord`, or `nvc`? - What scenarios to use for the requesting timestamps during benchmark? (`timestamps_mode`) ## Variables **Image content & size** We don't expect the same optimal settings for a dataset of images from a simulation, or from real-world in an apartment, or in a factory, or outdoor, or with lots of moving objects in the scene, etc. Similarly, loading times might not vary linearly with the image size (resolution). For these reasons, we run this benchmark on four representative datasets: - `lerobot/pusht_image`: (96 x 96 pixels) simulation with simple geometric shapes, fixed camera. - `aliberts/aloha_mobile_shrimp_image`: (480 x 640 pixels) real-world indoor, moving camera. - `aliberts/paris_street`: (720 x 1280 pixels) real-world outdoor, moving camera. - `aliberts/kitchen`: (1080 x 1920 pixels) real-world indoor, fixed camera. Note: The datasets used for this benchmark need to be image datasets, not video datasets. **Data augmentations** We might revisit this benchmark and find better settings if we train our policies with various data augmentations to make them more robust (e.g. robust to color changes, compression, etc.). ### Encoding parameters | parameter | values | |-------------|--------------------------------------------------------------| | **vcodec** | `libx264`, `libx265`, `libsvtav1` | | **pix_fmt** | `yuv444p`, `yuv420p` | | **g** | `1`, `2`, `3`, `4`, `5`, `6`, `10`, `15`, `20`, `40`, `None` | | **crf** | `0`, `5`, `10`, `15`, `20`, `25`, `30`, `40`, `50`, `None` | Note that `crf` value might be interpreted differently by various video codecs. In other words, the same value used with one codec doesn't necessarily translate into the same compression level with another codec. In fact, the default value (`None`) isn't the same amongst the different video codecs. Importantly, it is also the case for many other ffmpeg arguments like `g` which specifies the frequency of the key frames. For a comprehensive list and documentation of these parameters, see the ffmpeg documentation depending on the video codec used: - h264: https://trac.ffmpeg.org/wiki/Encode/H.264 - h265: https://trac.ffmpeg.org/wiki/Encode/H.265 - AV1: https://trac.ffmpeg.org/wiki/Encode/AV1 ### Decoding parameters **Decoder** We tested two video decoding backends from torchvision: - `pyav` (default) - `video_reader` (requires to build torchvision from source) **Requested timestamps** Given the way video decoding works, once a keyframe has been loaded, the decoding of subsequent frames is fast. This of course is affected by the `-g` parameter during encoding, which specifies the frequency of the keyframes. Given our typical use cases in robotics policies which might request a few timestamps in different random places, we want to replicate these use cases with the following scenarios: - `1_frame`: 1 frame, - `2_frames`: 2 consecutive frames (e.g. `[t, t + 1 / fps]`), - `6_frames`: 6 consecutive frames (e.g. `[t + i / fps for i in range(6)]`) Note that this differs significantly from a typical use case like watching a movie, in which every frame is loaded sequentially from the beginning to the end and it's acceptable to have big values for `-g`. Additionally, because some policies might request single timestamps that are a few frames apart, we also have the following scenario: - `2_frames_4_space`: 2 frames with 4 consecutive frames of spacing in between (e.g `[t, t + 5 / fps]`), However, due to how video decoding is implemented with `pyav`, we don't have access to an accurate seek so in practice this scenario is essentially the same as `6_frames` since all 6 frames between `t` and `t + 5 / fps` will be decoded. ## Metrics **Data compression ratio (lower is better)** `video_images_size_ratio` is the ratio of the memory space on disk taken by the encoded video over the memory space taken by the original images. For instance, `video_images_size_ratio=25%` means that the video takes 4 times less memory space on disk compared to the original images. **Loading time ratio (lower is better)** `video_images_load_time_ratio` is the ratio of the time it takes to decode frames from the video at a given timestamps over the time it takes to load the exact same original images. Lower is better. For instance, `video_images_load_time_ratio=200%` means that decoding from video is 2 times slower than loading the original images. **Average Mean Square Error (lower is better)** `avg_mse` is the average mean square error between each decoded frame and its corresponding original image over all requested timestamps, and also divided by the number of pixels in the image to be comparable when switching to different image sizes. **Average Peak Signal to Noise Ratio (higher is better)** `avg_psnr` measures the ratio between the maximum possible power of a signal and the power of corrupting noise that affects the fidelity of its representation. Higher PSNR indicates better quality. **Average Structural Similarity Index Measure (higher is better)** `avg_ssim` evaluates the perceived quality of images by comparing luminance, contrast, and structure. SSIM values range from -1 to 1, where 1 indicates perfect similarity. One aspect that can't be measured here with those metrics is the compatibility of the encoding across platforms, in particular on web browser, for visualization purposes. h264, h265 and AV1 are all commonly used codecs and should not pose an issue. However, the chroma subsampling (`pix_fmt`) format might affect compatibility: - `yuv420p` is more widely supported across various platforms, including web browsers. - `yuv444p` offers higher color fidelity but might not be supported as broadly. <!-- **Loss of a pretrained policy (higher is better)** (not available) `loss_pretrained` is the result of evaluating with the selected encoding/decoding settings a policy pretrained on original images. It is easier to understand than `avg_l2_error`. **Success rate after retraining (higher is better)** (not available) `success_rate` is the result of training and evaluating a policy with the selected encoding/decoding settings. It is the most difficult metric to get but also the very best. --> ## How the benchmark works The benchmark evaluates both encoding and decoding of video frames on the first episode of each dataset. **Encoding:** for each `vcodec` and `pix_fmt` pair, we use a default value for `g` and `crf` upon which we change a single value (either `g` or `crf`) to one of the specified values (we don't test every combination of those as this would be computationally too heavy). This gives a unique set of encoding parameters which is used to encode the episode. **Decoding:** Then, for each of those unique encodings, we iterate through every combination of the decoding parameters `backend` and `timestamps_mode`. For each of them, we record the metrics of a number of samples (given by `--num-samples`). This is parallelized for efficiency and the number of processes can be controlled with `--num-workers`. Ideally, it's best to have a `--num-samples` that is divisible by `--num-workers`. Intermediate results saved for each `vcodec` and `pix_fmt` combination in csv tables. These are then all concatenated to a single table ready for analysis. ## Caveats We tried to measure the most impactful parameters for both encoding and decoding. However, for computational reasons we can't test out every combination. Additional encoding parameters exist that are not included in this benchmark. In particular: - `-preset` which allows for selecting encoding presets. This represents a collection of options that will provide a certain encoding speed to compression ratio. By leaving this parameter unspecified, it is considered to be `medium` for libx264 and libx265 and `8` for libsvtav1. - `-tune` which allows to optimize the encoding for certains aspects (e.g. film quality, fast decoding, etc.). See the documentation mentioned above for more detailed info on these settings and for a more comprehensive list of other parameters. Similarly on the decoding side, other decoders exist but are not implemented in our current benchmark. To name a few: - `torchaudio` - `ffmpegio` - `decord` - `nvc` Note as well that since we are mostly interested in the performance at decoding time (also because encoding is done only once before uploading a dataset), we did not measure encoding times nor have any metrics regarding encoding. However, besides the necessity to build ffmpeg from source, encoding did not pose any issue and it didn't take a significant amount of time during this benchmark. ## Install Building ffmpeg from source is required to include libx265 and libaom/libsvtav1 (av1) video codecs ([compilation guide](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu)). **Note:** While you still need to build torchvision with a conda-installed `ffmpeg<4.3` to use the `video_reader` decoder (as described in [#220](https://github.com/huggingface/lerobot/pull/220)), you also need another version which is custom-built with all the video codecs for encoding. For the script to then use that version, you can prepend the command above with `PATH="$HOME/bin:$PATH"`, which is where ffmpeg should be built. ## Adding a video decoder Right now, we're only benchmarking the two video decoder available with torchvision: `pyav` and `video_reader`. You can easily add a new decoder to benchmark by adding it to this function in the script: ```diff def decode_video_frames( video_path: str, timestamps: list[float], tolerance_s: float, backend: str, ) -> torch.Tensor: if backend in ["pyav", "video_reader"]: return decode_video_frames_torchvision( video_path, timestamps, tolerance_s, backend ) + elif backend == ["your_decoder"]: + return your_decoder_function( + video_path, timestamps, tolerance_s, backend + ) else: raise NotImplementedError(backend) ``` ## Example For a quick run, you can try these parameters: ```bash python benchmark/video/run_video_benchmark.py \ --output-dir outputs/video_benchmark \ --repo-ids \ lerobot/pusht_image \ aliberts/aloha_mobile_shrimp_image \ --vcodec libx264 libx265 \ --pix-fmt yuv444p yuv420p \ --g 2 20 None \ --crf 10 40 None \ --timestamps-modes 1_frame 2_frames \ --backends pyav video_reader \ --num-samples 5 \ --num-workers 5 \ --save-frames 0 ``` ## Results ### Reproduce We ran the benchmark with the following parameters: ```bash # h264 and h265 encodings python benchmark/video/run_video_benchmark.py \ --output-dir outputs/video_benchmark \ --repo-ids \ lerobot/pusht_image \ aliberts/aloha_mobile_shrimp_image \ aliberts/paris_street \ aliberts/kitchen \ --vcodec libx264 libx265 \ --pix-fmt yuv444p yuv420p \ --g 1 2 3 4 5 6 10 15 20 40 None \ --crf 0 5 10 15 20 25 30 40 50 None \ --timestamps-modes 1_frame 2_frames 6_frames \ --backends pyav video_reader \ --num-samples 50 \ --num-workers 5 \ --save-frames 1 # av1 encoding (only compatible with yuv420p and pyav decoder) python benchmark/video/run_video_benchmark.py \ --output-dir outputs/video_benchmark \ --repo-ids \ lerobot/pusht_image \ aliberts/aloha_mobile_shrimp_image \ aliberts/paris_street \ aliberts/kitchen \ --vcodec libsvtav1 \ --pix-fmt yuv420p \ --g 1 2 3 4 5 6 10 15 20 40 None \ --crf 0 5 10 15 20 25 30 40 50 None \ --timestamps-modes 1_frame 2_frames 6_frames \ --backends pyav \ --num-samples 50 \ --num-workers 5 \ --save-frames 1 ``` The full results are available [here](https://docs.google.com/spreadsheets/d/1OYJB43Qu8fC26k_OyoMFgGBBKfQRCi4BIuYitQnq3sw/edit?usp=sharing) ### Parameters selected for LeRobotDataset Considering these results, we chose what we think is the best set of encoding parameter: - vcodec: `libsvtav1` - pix-fmt: `yuv420p` - g: `2` - crf: `30` Since we're using av1 encoding, we're choosing the `pyav` decoder as `video_reader` does not support it (and `pyav` doesn't require a custom build of `torchvision`). ### Summary These tables show the results for `g=2` and `crf=30`, using `timestamps-modes=6_frames` and `backend=pyav` | video_images_size_ratio | vcodec | pix_fmt | | | | |------------------------------------|------------|---------|-----------|-----------|-----------| | | libx264 | | libx265 | | libsvtav1 | | repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | | lerobot/pusht_image | **16.97%** | 17.58% | 18.57% | 18.86% | 22.06% | | aliberts/aloha_mobile_shrimp_image | 2.14% | 2.11% | 1.38% | **1.37%** | 5.59% | | aliberts/paris_street | 2.12% | 2.13% | **1.54%** | **1.54%** | 4.43% | | aliberts/kitchen | 1.40% | 1.39% | **1.00%** | **1.00%** | 2.52% | | video_images_load_time_ratio | vcodec | pix_fmt | | | | |------------------------------------|---------|---------|----------|---------|-----------| | | libx264 | | libx265 | | libsvtav1 | | repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | | lerobot/pusht_image | 6.45 | 5.19 | **1.90** | 2.12 | 2.47 | | aliberts/aloha_mobile_shrimp_image | 11.80 | 7.92 | 0.71 | 0.85 | **0.48** | | aliberts/paris_street | 2.21 | 2.05 | 0.36 | 0.49 | **0.30** | | aliberts/kitchen | 1.46 | 1.46 | 0.28 | 0.51 | **0.26** | | | | vcodec | pix_fmt | | | | |------------------------------------|----------|----------|--------------|----------|-----------|--------------| | | | libx264 | | libx265 | | libsvtav1 | | repo_id | metric | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | | lerobot/pusht_image | avg_mse | 2.90E-04 | **2.03E-04** | 3.13E-04 | 2.29E-04 | 2.19E-04 | | | avg_psnr | 35.44 | 37.07 | 35.49 | **37.30** | 37.20 | | | avg_ssim | 98.28% | **98.85%** | 98.31% | 98.84% | 98.72% | | aliberts/aloha_mobile_shrimp_image | avg_mse | 2.76E-04 | 2.59E-04 | 3.17E-04 | 3.06E-04 | **1.30E-04** | | | avg_psnr | 35.91 | 36.21 | 35.88 | 36.09 | **40.17** | | | avg_ssim | 95.19% | 95.18% | 95.00% | 95.05% | **97.73%** | | aliberts/paris_street | avg_mse | 6.89E-04 | 6.70E-04 | 4.03E-03 | 4.02E-03 | **3.09E-04** | | | avg_psnr | 33.48 | 33.68 | 32.05 | 32.15 | **35.40** | | | avg_ssim | 93.76% | 93.75% | 89.46% | 89.46% | **95.46%** | | aliberts/kitchen | avg_mse | 2.50E-04 | 2.24E-04 | 4.28E-04 | 4.18E-04 | **1.53E-04** | | | avg_psnr | 36.73 | 37.33 | 36.56 | 36.75 | **39.12** | | | avg_ssim | 95.47% | 95.58% | 95.52% | 95.53% | **96.82%** |
lerobot/benchmarks/video/README.md/0
{ "file_path": "lerobot/benchmarks/video/README.md", "repo_id": "lerobot", "token_count": 6162 }
"""This script demonstrates how to slice a dataset and calculate the loss on a subset of the data. This technique can be useful for debugging and testing purposes, as well as identifying whether a policy is learning effectively. Furthermore, relying on validation loss to evaluate performance is generally not considered a good practice, especially in the context of imitation learning. The most reliable approach is to evaluate the policy directly on the target environment, whether that be in simulation or the real world. """ import math import torch from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy def main(): device = torch.device("cuda") # Download the diffusion policy for pusht environment pretrained_policy_path = "lerobot/diffusion_pusht" # OR uncomment the following to evaluate a policy from the local outputs/train folder. # pretrained_policy_path = Path("outputs/train/example_pusht_diffusion") policy = DiffusionPolicy.from_pretrained(pretrained_policy_path) policy.eval() policy.to(device) # Set up the dataset. delta_timestamps = { # Load the previous image and state at -0.1 seconds before current frame, # then load current image and state corresponding to 0.0 second. "observation.image": [-0.1, 0.0], "observation.state": [-0.1, 0.0], # Load the previous action (-0.1), the next action to be executed (0.0), # and 14 future actions with a 0.1 seconds spacing. All these actions will be # used to calculate the loss. "action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4], } # Load the last 10% of episodes of the dataset as a validation set. # - Load dataset metadata dataset_metadata = LeRobotDatasetMetadata("lerobot/pusht") # - Calculate train and val episodes total_episodes = dataset_metadata.total_episodes episodes = list(range(dataset_metadata.total_episodes)) num_train_episodes = math.floor(total_episodes * 90 / 100) train_episodes = episodes[:num_train_episodes] val_episodes = episodes[num_train_episodes:] print(f"Number of episodes in full dataset: {total_episodes}") print(f"Number of episodes in training dataset (90% subset): {len(train_episodes)}") print(f"Number of episodes in validation dataset (10% subset): {len(val_episodes)}") # - Load train an val datasets train_dataset = LeRobotDataset( "lerobot/pusht", episodes=train_episodes, delta_timestamps=delta_timestamps ) val_dataset = LeRobotDataset("lerobot/pusht", episodes=val_episodes, delta_timestamps=delta_timestamps) print(f"Number of frames in training dataset (90% subset): {len(train_dataset)}") print(f"Number of frames in validation dataset (10% subset): {len(val_dataset)}") # Create dataloader for evaluation. val_dataloader = torch.utils.data.DataLoader( val_dataset, num_workers=4, batch_size=64, shuffle=False, pin_memory=device != torch.device("cpu"), drop_last=False, ) # Run validation loop. loss_cumsum = 0 n_examples_evaluated = 0 for batch in val_dataloader: batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()} output_dict = policy.forward(batch) loss_cumsum += output_dict["loss"].item() n_examples_evaluated += batch["index"].shape[0] # Calculate the average loss over the validation set. average_loss = loss_cumsum / n_examples_evaluated print(f"Average loss on validation set: {average_loss:.4f}") if __name__ == "__main__": main()
lerobot/examples/advanced/2_calculate_validation_loss.py/0
{ "file_path": "lerobot/examples/advanced/2_calculate_validation_loss.py", "repo_id": "lerobot", "token_count": 1345 }
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Process pickle files formatted like in: https://github.com/fyhMer/fowm""" import pickle import shutil from pathlib import Path import einops import torch import tqdm from datasets import Dataset, Features, Image, Sequence, Value from PIL import Image as PILImage from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION from lerobot.common.datasets.push_dataset_to_hub.utils import ( calculate_episode_data_index, concatenate_episodes, get_default_encoding, save_images_concurrently, ) from lerobot.common.datasets.utils import ( hf_transform_to_torch, ) from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames def check_format(raw_dir): keys = {"actions", "rewards", "dones"} nested_keys = {"observations": {"rgb", "state"}, "next_observations": {"rgb", "state"}} xarm_files = list(raw_dir.glob("*.pkl")) assert len(xarm_files) > 0 with open(xarm_files[0], "rb") as f: dataset_dict = pickle.load(f) assert isinstance(dataset_dict, dict) assert all(k in dataset_dict for k in keys) # Check for consistent lengths in nested keys expected_len = len(dataset_dict["actions"]) assert all(len(dataset_dict[key]) == expected_len for key in keys if key in dataset_dict) for key, subkeys in nested_keys.items(): nested_dict = dataset_dict.get(key, {}) assert all(len(nested_dict[subkey]) == expected_len for subkey in subkeys if subkey in nested_dict) def load_from_raw( raw_dir: Path, videos_dir: Path, fps: int, video: bool, episodes: list[int] | None = None, encoding: dict | None = None, ): pkl_path = raw_dir / "buffer.pkl" with open(pkl_path, "rb") as f: pkl_data = pickle.load(f) # load data indices from which each episode starts and ends from_ids, to_ids = [], [] from_idx, to_idx = 0, 0 for done in pkl_data["dones"]: to_idx += 1 if not done: continue from_ids.append(from_idx) to_ids.append(to_idx) from_idx = to_idx num_episodes = len(from_ids) ep_dicts = [] ep_ids = episodes if episodes else range(num_episodes) for ep_idx, selected_ep_idx in tqdm.tqdm(enumerate(ep_ids)): from_idx = from_ids[selected_ep_idx] to_idx = to_ids[selected_ep_idx] num_frames = to_idx - from_idx image = torch.tensor(pkl_data["observations"]["rgb"][from_idx:to_idx]) image = einops.rearrange(image, "b c h w -> b h w c") state = torch.tensor(pkl_data["observations"]["state"][from_idx:to_idx]) action = torch.tensor(pkl_data["actions"][from_idx:to_idx]) # TODO(rcadene): we have a missing last frame which is the observation when the env is done # it is critical to have this frame for tdmpc to predict a "done observation/state" # next_image = torch.tensor(pkl_data["next_observations"]["rgb"][from_idx:to_idx]) # next_state = torch.tensor(pkl_data["next_observations"]["state"][from_idx:to_idx]) next_reward = torch.tensor(pkl_data["rewards"][from_idx:to_idx]) next_done = torch.tensor(pkl_data["dones"][from_idx:to_idx]) ep_dict = {} imgs_array = [x.numpy() for x in image] img_key = "observation.image" if video: # save png images in temporary directory tmp_imgs_dir = videos_dir / "tmp_images" save_images_concurrently(imgs_array, tmp_imgs_dir) # encode images to a mp4 video fname = f"{img_key}_episode_{ep_idx:06d}.mp4" video_path = videos_dir / fname encode_video_frames(tmp_imgs_dir, video_path, fps, **(encoding or {})) # clean temporary images directory shutil.rmtree(tmp_imgs_dir) # store the reference to the video frame ep_dict[img_key] = [{"path": f"videos/{fname}", "timestamp": i / fps} for i in range(num_frames)] else: ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array] ep_dict["observation.state"] = state ep_dict["action"] = action ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames, dtype=torch.int64) ep_dict["frame_index"] = torch.arange(0, num_frames, 1) ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps # ep_dict["next.observation.image"] = next_image # ep_dict["next.observation.state"] = next_state ep_dict["next.reward"] = next_reward ep_dict["next.done"] = next_done ep_dicts.append(ep_dict) data_dict = concatenate_episodes(ep_dicts) total_frames = data_dict["frame_index"].shape[0] data_dict["index"] = torch.arange(0, total_frames, 1) return data_dict def to_hf_dataset(data_dict, video): features = {} if video: features["observation.image"] = VideoFrame() else: features["observation.image"] = Image() features["observation.state"] = Sequence( length=data_dict["observation.state"].shape[1], feature=Value(dtype="float32", id=None) ) features["action"] = Sequence( length=data_dict["action"].shape[1], feature=Value(dtype="float32", id=None) ) features["episode_index"] = Value(dtype="int64", id=None) features["frame_index"] = Value(dtype="int64", id=None) features["timestamp"] = Value(dtype="float32", id=None) features["next.reward"] = Value(dtype="float32", id=None) features["next.done"] = Value(dtype="bool", id=None) features["index"] = Value(dtype="int64", id=None) # TODO(rcadene): add success # features["next.success"] = Value(dtype='bool', id=None) hf_dataset = Dataset.from_dict(data_dict, features=Features(features)) hf_dataset.set_transform(hf_transform_to_torch) return hf_dataset def from_raw_to_lerobot_format( raw_dir: Path, videos_dir: Path, fps: int | None = None, video: bool = True, episodes: list[int] | None = None, encoding: dict | None = None, ): # sanity check check_format(raw_dir) if fps is None: fps = 15 data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding) hf_dataset = to_hf_dataset(data_dict, video) episode_data_index = calculate_episode_data_index(hf_dataset) info = { "codebase_version": CODEBASE_VERSION, "fps": fps, "video": video, } if video: info["encoding"] = get_default_encoding() return hf_dataset, episode_data_index, info
lerobot/lerobot/common/datasets/push_dataset_to_hub/xarm_pkl_format.py/0
{ "file_path": "lerobot/lerobot/common/datasets/push_dataset_to_hub/xarm_pkl_format.py", "repo_id": "lerobot", "token_count": 2996 }
#!/usr/bin/env python # Copyright 2024 Nicklas Hansen, Xiaolong Wang, Hao Su, # and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from lerobot.common.optim.optimizers import AdamConfig from lerobot.configs.policies import PreTrainedConfig from lerobot.configs.types import NormalizationMode @PreTrainedConfig.register_subclass("tdmpc") @dataclass class TDMPCConfig(PreTrainedConfig): """Configuration class for TDMPCPolicy. Defaults are configured for training with xarm_lift_medium_replay providing proprioceptive and single camera observations. The parameters you will most likely need to change are the ones which depend on the environment / sensors. Those are: `input_shapes`, `output_shapes`, and perhaps `max_random_shift_ratio`. Args: n_action_repeats: The number of times to repeat the action returned by the planning. (hint: Google action repeats in Q-learning or ask your favorite chatbot) horizon: Horizon for model predictive control. n_action_steps: Number of action steps to take from the plan given by model predictive control. This is an alternative to using action repeats. If this is set to more than 1, then we require `n_action_repeats == 1`, `use_mpc == True` and `n_action_steps <= horizon`. Note that this approach of using multiple steps from the plan is not in the original implementation. input_shapes: A dictionary defining the shapes of the input data for the policy. The key represents the input data name, and the value is a list indicating the dimensions of the corresponding data. For example, "observation.image" refers to an input from a camera with dimensions [3, 96, 96], indicating it has three color channels and 96x96 resolution. Importantly, `input_shapes` doesn't include batch dimension or temporal dimension. output_shapes: A dictionary defining the shapes of the output data for the policy. The key represents the output data name, and the value is a list indicating the dimensions of the corresponding data. For example, "action" refers to an output shape of [14], indicating 14-dimensional actions. Importantly, `output_shapes` doesn't include batch dimension or temporal dimension. input_normalization_modes: A dictionary with key representing the modality (e.g. "observation.state"), and the value specifies the normalization mode to apply. The two available modes are "mean_std" which subtracts the mean and divides by the standard deviation and "min_max" which rescale in a [-1, 1] range. Note that here this defaults to None meaning inputs are not normalized. This is to match the original implementation. output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the original scale. Note that this is also used for normalizing the training targets. NOTE: Clipping to [-1, +1] is used during MPPI/CEM. Therefore, it is recommended that you stick with "min_max" normalization mode here. image_encoder_hidden_dim: Number of channels for the convolutional layers used for image encoding. state_encoder_hidden_dim: Hidden dimension for MLP used for state vector encoding. latent_dim: Observation's latent embedding dimension. q_ensemble_size: Number of Q function estimators to use in an ensemble for uncertainty estimation. mlp_dim: Hidden dimension of MLPs used for modelling the dynamics encoder, reward function, policy (π), Q ensemble, and V. discount: Discount factor (γ) to use for the reinforcement learning formalism. use_mpc: Whether to use model predictive control. The alternative is to just sample the policy model (π) for each step. cem_iterations: Number of iterations for the MPPI/CEM loop in MPC. max_std: Maximum standard deviation for actions sampled from the gaussian PDF in CEM. min_std: Minimum standard deviation for noise applied to actions sampled from the policy model (π). Doubles up as the minimum standard deviation for actions sampled from the gaussian PDF in CEM. n_gaussian_samples: Number of samples to draw from the gaussian distribution every CEM iteration. Must be non-zero. n_pi_samples: Number of samples to draw from the policy / world model rollout every CEM iteration. Can be zero. uncertainty_regularizer_coeff: Coefficient for the uncertainty regularization used when estimating trajectory values (this is the λ coeffiecient in eqn 4 of FOWM). n_elites: The number of elite samples to use for updating the gaussian parameters every CEM iteration. elite_weighting_temperature: The temperature to use for softmax weighting (by trajectory value) of the elites, when updating the gaussian parameters for CEM. gaussian_mean_momentum: Momentum (α) used for EMA updates of the mean parameter μ of the gaussian parameters optimized in CEM. Updates are calculated as μ⁻ ← αμ⁻ + (1-α)μ. max_random_shift_ratio: Maximum random shift (as a proportion of the image size) to apply to the image(s) (in units of pixels) for training-time augmentation. If set to 0, no such augmentation is applied. Note that the input images are assumed to be square for this augmentation. reward_coeff: Loss weighting coefficient for the reward regression loss. expectile_weight: Weighting (τ) used in expectile regression for the state value function (V). v_pred < v_target is weighted by τ and v_pred >= v_target is weighted by (1-τ). τ is expected to be in [0, 1]. Setting τ closer to 1 results in a more "optimistic" V. This is sensible to do because v_target is obtained by evaluating the learned state-action value functions (Q) with in-sample actions that may not be always optimal. value_coeff: Loss weighting coefficient for both the state-action value (Q) TD loss, and the state value (V) expectile regression loss. consistency_coeff: Loss weighting coefficient for the consistency loss. advantage_scaling: A factor by which the advantages are scaled prior to exponentiation for advantage weighted regression of the policy (π) estimator parameters. Note that the exponentiated advantages are clamped at 100.0. pi_coeff: Loss weighting coefficient for the action regression loss. temporal_decay_coeff: Exponential decay coefficient for decaying the loss coefficient for future time- steps. Hint: each loss computation involves `horizon` steps worth of actions starting from the current time step. target_model_momentum: Momentum (α) used for EMA updates of the target models. Updates are calculated as ϕ ← αϕ + (1-α)θ where ϕ are the parameters of the target model and θ are the parameters of the model being trained. """ # Input / output structure. n_obs_steps: int = 1 n_action_repeats: int = 2 horizon: int = 5 n_action_steps: int = 1 normalization_mapping: dict[str, NormalizationMode] = field( default_factory=lambda: { "VISUAL": NormalizationMode.IDENTITY, "STATE": NormalizationMode.IDENTITY, "ENV": NormalizationMode.IDENTITY, "ACTION": NormalizationMode.MIN_MAX, } ) # Architecture / modeling. # Neural networks. image_encoder_hidden_dim: int = 32 state_encoder_hidden_dim: int = 256 latent_dim: int = 50 q_ensemble_size: int = 5 mlp_dim: int = 512 # Reinforcement learning. discount: float = 0.9 # Inference. use_mpc: bool = True cem_iterations: int = 6 max_std: float = 2.0 min_std: float = 0.05 n_gaussian_samples: int = 512 n_pi_samples: int = 51 uncertainty_regularizer_coeff: float = 1.0 n_elites: int = 50 elite_weighting_temperature: float = 0.5 gaussian_mean_momentum: float = 0.1 # Training and loss computation. max_random_shift_ratio: float = 0.0476 # Loss coefficients. reward_coeff: float = 0.5 expectile_weight: float = 0.9 value_coeff: float = 0.1 consistency_coeff: float = 20.0 advantage_scaling: float = 3.0 pi_coeff: float = 0.5 temporal_decay_coeff: float = 0.5 # Target model. target_model_momentum: float = 0.995 # Training presets optimizer_lr: float = 3e-4 def __post_init__(self): super().__post_init__() """Input validation (not exhaustive).""" if self.n_gaussian_samples <= 0: raise ValueError( f"The number of guassian samples for CEM should be non-zero. Got `{self.n_gaussian_samples=}`" ) if self.normalization_mapping["ACTION"] is not NormalizationMode.MIN_MAX: raise ValueError( "TD-MPC assumes the action space dimensions to all be in [-1, 1]. Therefore it is strongly " f"advised that you stick with the default. See {self.__class__.__name__} docstring for more " "information." ) if self.n_obs_steps != 1: raise ValueError( f"Multiple observation steps not handled yet. Got `nobs_steps={self.n_obs_steps}`" ) if self.n_action_steps > 1: if self.n_action_repeats != 1: raise ValueError( "If `n_action_steps > 1`, `n_action_repeats` must be left to its default value of 1." ) if not self.use_mpc: raise ValueError("If `n_action_steps > 1`, `use_mpc` must be set to `True`.") if self.n_action_steps > self.horizon: raise ValueError("`n_action_steps` must be less than or equal to `horizon`.") def get_optimizer_preset(self) -> AdamConfig: return AdamConfig(lr=self.optimizer_lr) def get_scheduler_preset(self) -> None: return None def validate_features(self) -> None: # There should only be one image key. if len(self.image_features) > 1: raise ValueError( f"{self.__class__.__name__} handles at most one image for now. Got image keys {self.image_features}." ) if len(self.image_features) > 0: image_ft = next(iter(self.image_features.values())) if image_ft.shape[-2] != image_ft.shape[-1]: # TODO(alexander-soare): This limitation is solely because of code in the random shift # augmentation. It should be able to be removed. raise ValueError(f"Only square images are handled now. Got image shape {image_ft.shape}.") @property def observation_delta_indices(self) -> list: return list(range(self.horizon + 1)) @property def action_delta_indices(self) -> list: return list(range(self.horizon)) @property def reward_delta_indices(self) -> None: return list(range(self.horizon))
lerobot/lerobot/common/policies/tdmpc/configuration_tdmpc.py/0
{ "file_path": "lerobot/lerobot/common/policies/tdmpc/configuration_tdmpc.py", "repo_id": "lerobot", "token_count": 4270 }
import abc from dataclasses import dataclass, field from typing import Sequence import draccus from lerobot.common.robot_devices.cameras.configs import ( CameraConfig, IntelRealSenseCameraConfig, OpenCVCameraConfig, ) from lerobot.common.robot_devices.motors.configs import ( DynamixelMotorsBusConfig, FeetechMotorsBusConfig, MotorsBusConfig, ) @dataclass class RobotConfig(draccus.ChoiceRegistry, abc.ABC): @property def type(self) -> str: return self.get_choice_name(self.__class__) # TODO(rcadene, aliberts): remove ManipulatorRobotConfig abstraction @dataclass class ManipulatorRobotConfig(RobotConfig): leader_arms: dict[str, MotorsBusConfig] = field(default_factory=lambda: {}) follower_arms: dict[str, MotorsBusConfig] = field(default_factory=lambda: {}) cameras: dict[str, CameraConfig] = field(default_factory=lambda: {}) # Optionally limit the magnitude of the relative positional target vector for safety purposes. # Set this to a positive scalar to have the same value for all motors, or a list that is the same length # as the number of motors in your follower arms (assumes all follower arms have the same number of # motors). max_relative_target: list[float] | float | None = None # Optionally set the leader arm in torque mode with the gripper motor set to this angle. This makes it # possible to squeeze the gripper and have it spring back to an open position on its own. If None, the # gripper is not put in torque mode. gripper_open_degree: float | None = None mock: bool = False def __post_init__(self): if self.mock: for arm in self.leader_arms.values(): if not arm.mock: arm.mock = True for arm in self.follower_arms.values(): if not arm.mock: arm.mock = True for cam in self.cameras.values(): if not cam.mock: cam.mock = True if self.max_relative_target is not None and isinstance(self.max_relative_target, Sequence): for name in self.follower_arms: if len(self.follower_arms[name].motors) != len(self.max_relative_target): raise ValueError( f"len(max_relative_target)={len(self.max_relative_target)} but the follower arm with name {name} has " f"{len(self.follower_arms[name].motors)} motors. Please make sure that the " f"`max_relative_target` list has as many parameters as there are motors per arm. " "Note: This feature does not yet work with robots where different follower arms have " "different numbers of motors." ) @RobotConfig.register_subclass("aloha") @dataclass class AlohaRobotConfig(ManipulatorRobotConfig): # Specific to Aloha, LeRobot comes with default calibration files. Assuming the motors have been # properly assembled, no manual calibration step is expected. If you need to run manual calibration, # simply update this path to ".cache/calibration/aloha" calibration_dir: str = ".cache/calibration/aloha_default" # /!\ FOR SAFETY, READ THIS /!\ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as # the number of motors in your follower arms. # For Aloha, for every goal position request, motor rotations are capped at 5 degrees by default. # When you feel more confident with teleoperation or running the policy, you can extend # this safety limit and even removing it by setting it to `null`. # Also, everything is expected to work safely out-of-the-box, but we highly advise to # first try to teleoperate the grippers only (by commenting out the rest of the motors in this yaml), # then to gradually add more motors (by uncommenting), until you can teleoperate both arms fully max_relative_target: int | None = 5 leader_arms: dict[str, MotorsBusConfig] = field( default_factory=lambda: { "left": DynamixelMotorsBusConfig( # window_x port="/dev/ttyDXL_leader_left", motors={ # name: (index, model) "waist": [1, "xm430-w350"], "shoulder": [2, "xm430-w350"], "shoulder_shadow": [3, "xm430-w350"], "elbow": [4, "xm430-w350"], "elbow_shadow": [5, "xm430-w350"], "forearm_roll": [6, "xm430-w350"], "wrist_angle": [7, "xm430-w350"], "wrist_rotate": [8, "xl430-w250"], "gripper": [9, "xc430-w150"], }, ), "right": DynamixelMotorsBusConfig( # window_x port="/dev/ttyDXL_leader_right", motors={ # name: (index, model) "waist": [1, "xm430-w350"], "shoulder": [2, "xm430-w350"], "shoulder_shadow": [3, "xm430-w350"], "elbow": [4, "xm430-w350"], "elbow_shadow": [5, "xm430-w350"], "forearm_roll": [6, "xm430-w350"], "wrist_angle": [7, "xm430-w350"], "wrist_rotate": [8, "xl430-w250"], "gripper": [9, "xc430-w150"], }, ), } ) follower_arms: dict[str, MotorsBusConfig] = field( default_factory=lambda: { "left": DynamixelMotorsBusConfig( port="/dev/ttyDXL_follower_left", motors={ # name: (index, model) "waist": [1, "xm540-w270"], "shoulder": [2, "xm540-w270"], "shoulder_shadow": [3, "xm540-w270"], "elbow": [4, "xm540-w270"], "elbow_shadow": [5, "xm540-w270"], "forearm_roll": [6, "xm540-w270"], "wrist_angle": [7, "xm540-w270"], "wrist_rotate": [8, "xm430-w350"], "gripper": [9, "xm430-w350"], }, ), "right": DynamixelMotorsBusConfig( port="/dev/ttyDXL_follower_right", motors={ # name: (index, model) "waist": [1, "xm540-w270"], "shoulder": [2, "xm540-w270"], "shoulder_shadow": [3, "xm540-w270"], "elbow": [4, "xm540-w270"], "elbow_shadow": [5, "xm540-w270"], "forearm_roll": [6, "xm540-w270"], "wrist_angle": [7, "xm540-w270"], "wrist_rotate": [8, "xm430-w350"], "gripper": [9, "xm430-w350"], }, ), } ) # Troubleshooting: If one of your IntelRealSense cameras freeze during # data recording due to bandwidth limit, you might need to plug the camera # on another USB hub or PCIe card. cameras: dict[str, CameraConfig] = field( default_factory=lambda: { "cam_high": IntelRealSenseCameraConfig( serial_number=128422271347, fps=30, width=640, height=480, ), "cam_low": IntelRealSenseCameraConfig( serial_number=130322270656, fps=30, width=640, height=480, ), "cam_left_wrist": IntelRealSenseCameraConfig( serial_number=218622272670, fps=30, width=640, height=480, ), "cam_right_wrist": IntelRealSenseCameraConfig( serial_number=130322272300, fps=30, width=640, height=480, ), } ) mock: bool = False @RobotConfig.register_subclass("koch") @dataclass class KochRobotConfig(ManipulatorRobotConfig): calibration_dir: str = ".cache/calibration/koch" # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as # the number of motors in your follower arms. max_relative_target: int | None = None leader_arms: dict[str, MotorsBusConfig] = field( default_factory=lambda: { "main": DynamixelMotorsBusConfig( port="/dev/tty.usbmodem585A0085511", motors={ # name: (index, model) "shoulder_pan": [1, "xl330-m077"], "shoulder_lift": [2, "xl330-m077"], "elbow_flex": [3, "xl330-m077"], "wrist_flex": [4, "xl330-m077"], "wrist_roll": [5, "xl330-m077"], "gripper": [6, "xl330-m077"], }, ), } ) follower_arms: dict[str, MotorsBusConfig] = field( default_factory=lambda: { "main": DynamixelMotorsBusConfig( port="/dev/tty.usbmodem585A0076891", motors={ # name: (index, model) "shoulder_pan": [1, "xl430-w250"], "shoulder_lift": [2, "xl430-w250"], "elbow_flex": [3, "xl330-m288"], "wrist_flex": [4, "xl330-m288"], "wrist_roll": [5, "xl330-m288"], "gripper": [6, "xl330-m288"], }, ), } ) cameras: dict[str, CameraConfig] = field( default_factory=lambda: { "laptop": OpenCVCameraConfig( camera_index=0, fps=30, width=640, height=480, ), "phone": OpenCVCameraConfig( camera_index=1, fps=30, width=640, height=480, ), } ) # ~ Koch specific settings ~ # Sets the leader arm in torque mode with the gripper motor set to this angle. This makes it possible # to squeeze the gripper and have it spring back to an open position on its own. gripper_open_degree: float = 35.156 mock: bool = False @RobotConfig.register_subclass("koch_bimanual") @dataclass class KochBimanualRobotConfig(ManipulatorRobotConfig): calibration_dir: str = ".cache/calibration/koch_bimanual" # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as # the number of motors in your follower arms. max_relative_target: int | None = None leader_arms: dict[str, MotorsBusConfig] = field( default_factory=lambda: { "left": DynamixelMotorsBusConfig( port="/dev/tty.usbmodem585A0085511", motors={ # name: (index, model) "shoulder_pan": [1, "xl330-m077"], "shoulder_lift": [2, "xl330-m077"], "elbow_flex": [3, "xl330-m077"], "wrist_flex": [4, "xl330-m077"], "wrist_roll": [5, "xl330-m077"], "gripper": [6, "xl330-m077"], }, ), "right": DynamixelMotorsBusConfig( port="/dev/tty.usbmodem575E0031751", motors={ # name: (index, model) "shoulder_pan": [1, "xl330-m077"], "shoulder_lift": [2, "xl330-m077"], "elbow_flex": [3, "xl330-m077"], "wrist_flex": [4, "xl330-m077"], "wrist_roll": [5, "xl330-m077"], "gripper": [6, "xl330-m077"], }, ), } ) follower_arms: dict[str, MotorsBusConfig] = field( default_factory=lambda: { "left": DynamixelMotorsBusConfig( port="/dev/tty.usbmodem585A0076891", motors={ # name: (index, model) "shoulder_pan": [1, "xl430-w250"], "shoulder_lift": [2, "xl430-w250"], "elbow_flex": [3, "xl330-m288"], "wrist_flex": [4, "xl330-m288"], "wrist_roll": [5, "xl330-m288"], "gripper": [6, "xl330-m288"], }, ), "right": DynamixelMotorsBusConfig( port="/dev/tty.usbmodem575E0032081", motors={ # name: (index, model) "shoulder_pan": [1, "xl430-w250"], "shoulder_lift": [2, "xl430-w250"], "elbow_flex": [3, "xl330-m288"], "wrist_flex": [4, "xl330-m288"], "wrist_roll": [5, "xl330-m288"], "gripper": [6, "xl330-m288"], }, ), } ) cameras: dict[str, CameraConfig] = field( default_factory=lambda: { "laptop": OpenCVCameraConfig( camera_index=0, fps=30, width=640, height=480, ), "phone": OpenCVCameraConfig( camera_index=1, fps=30, width=640, height=480, ), } ) # ~ Koch specific settings ~ # Sets the leader arm in torque mode with the gripper motor set to this angle. This makes it possible # to squeeze the gripper and have it spring back to an open position on its own. gripper_open_degree: float = 35.156 mock: bool = False @RobotConfig.register_subclass("moss") @dataclass class MossRobotConfig(ManipulatorRobotConfig): calibration_dir: str = ".cache/calibration/moss" # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as # the number of motors in your follower arms. max_relative_target: int | None = None leader_arms: dict[str, MotorsBusConfig] = field( default_factory=lambda: { "main": FeetechMotorsBusConfig( port="/dev/tty.usbmodem58760431091", motors={ # name: (index, model) "shoulder_pan": [1, "sts3215"], "shoulder_lift": [2, "sts3215"], "elbow_flex": [3, "sts3215"], "wrist_flex": [4, "sts3215"], "wrist_roll": [5, "sts3215"], "gripper": [6, "sts3215"], }, ), } ) follower_arms: dict[str, MotorsBusConfig] = field( default_factory=lambda: { "main": FeetechMotorsBusConfig( port="/dev/tty.usbmodem585A0076891", motors={ # name: (index, model) "shoulder_pan": [1, "sts3215"], "shoulder_lift": [2, "sts3215"], "elbow_flex": [3, "sts3215"], "wrist_flex": [4, "sts3215"], "wrist_roll": [5, "sts3215"], "gripper": [6, "sts3215"], }, ), } ) cameras: dict[str, CameraConfig] = field( default_factory=lambda: { "laptop": OpenCVCameraConfig( camera_index=0, fps=30, width=640, height=480, ), "phone": OpenCVCameraConfig( camera_index=1, fps=30, width=640, height=480, ), } ) mock: bool = False @RobotConfig.register_subclass("so100") @dataclass class So100RobotConfig(ManipulatorRobotConfig): calibration_dir: str = ".cache/calibration/so100" # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as # the number of motors in your follower arms. max_relative_target: int | None = None leader_arms: dict[str, MotorsBusConfig] = field( default_factory=lambda: { "main": FeetechMotorsBusConfig( port="/dev/tty.usbmodem58760431091", motors={ # name: (index, model) "shoulder_pan": [1, "sts3215"], "shoulder_lift": [2, "sts3215"], "elbow_flex": [3, "sts3215"], "wrist_flex": [4, "sts3215"], "wrist_roll": [5, "sts3215"], "gripper": [6, "sts3215"], }, ), } ) follower_arms: dict[str, MotorsBusConfig] = field( default_factory=lambda: { "main": FeetechMotorsBusConfig( port="/dev/tty.usbmodem585A0076891", motors={ # name: (index, model) "shoulder_pan": [1, "sts3215"], "shoulder_lift": [2, "sts3215"], "elbow_flex": [3, "sts3215"], "wrist_flex": [4, "sts3215"], "wrist_roll": [5, "sts3215"], "gripper": [6, "sts3215"], }, ), } ) cameras: dict[str, CameraConfig] = field( default_factory=lambda: { "laptop": OpenCVCameraConfig( camera_index=0, fps=30, width=640, height=480, ), "phone": OpenCVCameraConfig( camera_index=1, fps=30, width=640, height=480, ), } ) mock: bool = False @RobotConfig.register_subclass("stretch") @dataclass class StretchRobotConfig(RobotConfig): # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as # the number of motors in your follower arms. max_relative_target: int | None = None cameras: dict[str, CameraConfig] = field( default_factory=lambda: { "navigation": OpenCVCameraConfig( camera_index="/dev/hello-nav-head-camera", fps=10, width=1280, height=720, rotation=-90, ), "head": IntelRealSenseCameraConfig( name="Intel RealSense D435I", fps=30, width=640, height=480, rotation=90, ), "wrist": IntelRealSenseCameraConfig( name="Intel RealSense D405", fps=30, width=640, height=480, ), } ) mock: bool = False
lerobot/lerobot/common/robot_devices/robots/configs.py/0
{ "file_path": "lerobot/lerobot/common/robot_devices/robots/configs.py", "repo_id": "lerobot", "token_count": 10320 }
import datetime as dt import logging import os from dataclasses import dataclass, field from pathlib import Path from typing import Type import draccus from huggingface_hub import hf_hub_download from huggingface_hub.errors import HfHubHTTPError from lerobot.common import envs from lerobot.common.optim import OptimizerConfig from lerobot.common.optim.schedulers import LRSchedulerConfig from lerobot.common.utils.hub import HubMixin from lerobot.common.utils.utils import auto_select_torch_device, is_amp_available from lerobot.configs import parser from lerobot.configs.default import DatasetConfig, EvalConfig, WandBConfig from lerobot.configs.policies import PreTrainedConfig TRAIN_CONFIG_NAME = "train_config.json" @dataclass class OfflineConfig: steps: int = 100_000 @dataclass class OnlineConfig: """ The online training loop looks something like: ```python for i in range(steps): do_online_rollout_and_update_online_buffer() for j in range(steps_between_rollouts): batch = next(dataloader_with_offline_and_online_data) loss = policy(batch) loss.backward() optimizer.step() ``` Note that the online training loop adopts most of the options from the offline loop unless specified otherwise. """ steps: int = 0 # How many episodes to collect at once when we reach the online rollout part of the training loop. rollout_n_episodes: int = 1 # The number of environments to use in the gym.vector.VectorEnv. This ends up also being the batch size for # the policy. Ideally you should set this to by an even divisor of rollout_n_episodes. rollout_batch_size: int = 1 # How many optimization steps (forward, backward, optimizer step) to do between running rollouts. steps_between_rollouts: int | None = None # The proportion of online samples (vs offline samples) to include in the online training batches. sampling_ratio: float = 0.5 # First seed to use for the online rollout environment. Seeds for subsequent rollouts are incremented by 1. env_seed: int | None = None # Sets the maximum number of frames that are stored in the online buffer for online training. The buffer is # FIFO. buffer_capacity: int | None = None # The minimum number of frames to have in the online buffer before commencing online training. # If buffer_seed_size > rollout_n_episodes, the rollout will be run multiple times until the # seed size condition is satisfied. buffer_seed_size: int = 0 # Whether to run the online rollouts asynchronously. This means we can run the online training steps in # parallel with the rollouts. This might be advised if your GPU has the bandwidth to handle training # + eval + environment rendering simultaneously. do_rollout_async: bool = False def __post_init__(self): if self.steps == 0: return if self.steps_between_rollouts is None: raise ValueError( "'steps_between_rollouts' must be set to a positive integer, but it is currently None." ) if self.env_seed is None: raise ValueError("'env_seed' must be set to a positive integer, but it is currently None.") if self.buffer_capacity is None: raise ValueError("'buffer_capacity' must be set to a positive integer, but it is currently None.") @dataclass class TrainPipelineConfig(HubMixin): dataset: DatasetConfig env: envs.EnvConfig | None = None policy: PreTrainedConfig | None = None # Set `dir` to where you would like to save all of the run outputs. If you run another training session # with the same value for `dir` its contents will be overwritten unless you set `resume` to true. output_dir: Path | None = None job_name: str | None = None # Set `resume` to true to resume a previous run. In order for this to work, you will need to make sure # `dir` is the directory of an existing run with at least one checkpoint in it. # Note that when resuming a run, the default behavior is to use the configuration from the checkpoint, # regardless of what's provided with the training command at the time of resumption. resume: bool = False device: str | None = None # cuda | cpu | mp # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP, # automatic gradient scaling is used. use_amp: bool = False # `seed` is used for training (eg: model initialization, dataset shuffling) # AND for the evaluation environments. seed: int | None = 1000 # Number of workers for the dataloader. num_workers: int = 4 batch_size: int = 8 eval_freq: int = 20_000 log_freq: int = 200 save_checkpoint: bool = True # Checkpoint is saved every `save_freq` training iterations and after the last training step. save_freq: int = 20_000 offline: OfflineConfig = field(default_factory=OfflineConfig) online: OnlineConfig = field(default_factory=OnlineConfig) use_policy_training_preset: bool = True optimizer: OptimizerConfig | None = None scheduler: LRSchedulerConfig | None = None eval: EvalConfig = field(default_factory=EvalConfig) wandb: WandBConfig = field(default_factory=WandBConfig) def __post_init__(self): self.checkpoint_path = None def validate(self): if not self.device: logging.warning("No device specified, trying to infer device automatically") device = auto_select_torch_device() self.device = device.type # Automatically deactivate AMP if necessary if self.use_amp and not is_amp_available(self.device): logging.warning( f"Automatic Mixed Precision (amp) is not available on device '{self.device}'. Deactivating AMP." ) self.use_amp = False # HACK: We parse again the cli args here to get the pretrained paths if there was some. policy_path = parser.get_path_arg("policy") if policy_path: # Only load the policy config cli_overrides = parser.get_cli_overrides("policy") self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) self.policy.pretrained_path = policy_path elif self.resume: # The entire train config is already loaded, we just need to get the checkpoint dir config_path = parser.parse_arg("config_path") if not config_path: raise ValueError("A config_path is expected when resuming a run.") policy_path = Path(config_path).parent self.policy.pretrained_path = policy_path self.checkpoint_path = policy_path.parent if not self.job_name: if self.env is None: self.job_name = f"{self.policy.type}" else: self.job_name = f"{self.env.type}_{self.policy.type}" if not self.resume and isinstance(self.output_dir, Path) and self.output_dir.is_dir(): raise FileExistsError( f"Output directory {self.output_dir} alreay exists and resume is {self.resume}. " f"Please change your output directory so that {self.output_dir} is not overwritten." ) elif not self.output_dir: now = dt.datetime.now() train_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}" self.output_dir = Path("outputs/train") / train_dir if self.online.steps > 0: if isinstance(self.dataset.repo_id, list): raise NotImplementedError("Online training with LeRobotMultiDataset is not implemented.") if self.env is None: raise ValueError("An environment is required for online training") if not self.use_policy_training_preset and (self.optimizer is None or self.scheduler is None): raise ValueError("Optimizer and Scheduler must be set when the policy presets are not used.") elif self.use_policy_training_preset and not self.resume: self.optimizer = self.policy.get_optimizer_preset() self.scheduler = self.policy.get_scheduler_preset() @classmethod def __get_path_fields__(cls) -> list[str]: """This enables the parser to load config from the policy using `--policy.path=local/dir`""" return ["policy"] def _save_pretrained(self, save_directory: Path) -> None: with open(save_directory / TRAIN_CONFIG_NAME, "w") as f, draccus.config_type("json"): draccus.dump(self, f, indent=4) @classmethod def from_pretrained( cls: Type["TrainPipelineConfig"], pretrained_name_or_path: str | Path, *, force_download: bool = False, resume_download: bool = None, proxies: dict | None = None, token: str | bool | None = None, cache_dir: str | Path | None = None, local_files_only: bool = False, revision: str | None = None, **kwargs, ) -> "TrainPipelineConfig": model_id = str(pretrained_name_or_path) config_file: str | None = None if Path(model_id).is_dir(): if TRAIN_CONFIG_NAME in os.listdir(model_id): config_file = os.path.join(model_id, TRAIN_CONFIG_NAME) else: print(f"{TRAIN_CONFIG_NAME} not found in {Path(model_id).resolve()}") elif Path(model_id).is_file(): config_file = model_id else: try: config_file = hf_hub_download( repo_id=model_id, filename=TRAIN_CONFIG_NAME, revision=revision, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, token=token, local_files_only=local_files_only, ) except HfHubHTTPError as e: raise FileNotFoundError( f"{TRAIN_CONFIG_NAME} not found on the HuggingFace Hub in {model_id}" ) from e cli_args = kwargs.pop("cli_args", []) cfg = draccus.parse(cls, config_file, args=cli_args) return cfg
lerobot/lerobot/configs/train.py/0
{ "file_path": "lerobot/lerobot/configs/train.py", "repo_id": "lerobot", "token_count": 4120 }
""" Tests for physical cameras and their mocked versions. If the physical camera is not connected to the computer, or not working, the test will be skipped. Example of running a specific test: ```bash pytest -sx tests/test_cameras.py::test_camera ``` Example of running test on a real camera connected to the computer: ```bash pytest -sx 'tests/test_cameras.py::test_camera[opencv-False]' pytest -sx 'tests/test_cameras.py::test_camera[intelrealsense-False]' ``` Example of running test on a mocked version of the camera: ```bash pytest -sx 'tests/test_cameras.py::test_camera[opencv-True]' pytest -sx 'tests/test_cameras.py::test_camera[intelrealsense-True]' ``` """ import numpy as np import pytest from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError from tests.utils import TEST_CAMERA_TYPES, make_camera, require_camera # Maximum absolute difference between two consecutive images recored by a camera. # This value differs with respect to the camera. MAX_PIXEL_DIFFERENCE = 25 def compute_max_pixel_difference(first_image, second_image): return np.abs(first_image.astype(float) - second_image.astype(float)).max() @pytest.mark.parametrize("camera_type, mock", TEST_CAMERA_TYPES) @require_camera def test_camera(request, camera_type, mock): """Test assumes that `camera.read()` returns the same image when called multiple times in a row. So the environment should not change (you shouldnt be in front of the camera) and the camera should not be moving. Warning: The tests worked for a macbookpro camera, but I am getting assertion error (`np.allclose(color_image, async_color_image)`) for my iphone camera and my LG monitor camera. """ # TODO(rcadene): measure fps in nightly? # TODO(rcadene): test logs if camera_type == "opencv" and not mock: pytest.skip("TODO(rcadene): fix test for opencv physical camera") camera_kwargs = {"camera_type": camera_type, "mock": mock} # Test instantiating camera = make_camera(**camera_kwargs) # Test reading, async reading, disconnecting before connecting raises an error with pytest.raises(RobotDeviceNotConnectedError): camera.read() with pytest.raises(RobotDeviceNotConnectedError): camera.async_read() with pytest.raises(RobotDeviceNotConnectedError): camera.disconnect() # Test deleting the object without connecting first del camera # Test connecting camera = make_camera(**camera_kwargs) camera.connect() assert camera.is_connected assert camera.fps is not None assert camera.width is not None assert camera.height is not None # Test connecting twice raises an error with pytest.raises(RobotDeviceAlreadyConnectedError): camera.connect() # Test reading from the camera color_image = camera.read() assert isinstance(color_image, np.ndarray) assert color_image.ndim == 3 h, w, c = color_image.shape assert c == 3 assert w > h # Test read and async_read outputs similar images # ...warming up as the first frames can be black for _ in range(30): camera.read() color_image = camera.read() async_color_image = camera.async_read() error_msg = ( "max_pixel_difference between read() and async_read()", compute_max_pixel_difference(color_image, async_color_image), ) # TODO(rcadene): properly set `rtol` np.testing.assert_allclose( color_image, async_color_image, rtol=1e-5, atol=MAX_PIXEL_DIFFERENCE, err_msg=error_msg ) # Test disconnecting camera.disconnect() assert camera.camera is None assert camera.thread is None # Test disconnecting with `__del__` camera = make_camera(**camera_kwargs) camera.connect() del camera # Test acquiring a bgr image camera = make_camera(**camera_kwargs, color_mode="bgr") camera.connect() assert camera.color_mode == "bgr" bgr_color_image = camera.read() np.testing.assert_allclose( color_image, bgr_color_image[:, :, [2, 1, 0]], rtol=1e-5, atol=MAX_PIXEL_DIFFERENCE, err_msg=error_msg ) del camera # Test acquiring a rotated image camera = make_camera(**camera_kwargs) camera.connect() ori_color_image = camera.read() del camera for rotation in [None, 90, 180, -90]: camera = make_camera(**camera_kwargs, rotation=rotation) camera.connect() if mock: import tests.mock_cv2 as cv2 else: import cv2 if rotation is None: manual_rot_img = ori_color_image assert camera.rotation is None elif rotation == 90: manual_rot_img = np.rot90(color_image, k=1) assert camera.rotation == cv2.ROTATE_90_CLOCKWISE elif rotation == 180: manual_rot_img = np.rot90(color_image, k=2) assert camera.rotation == cv2.ROTATE_180 elif rotation == -90: manual_rot_img = np.rot90(color_image, k=3) assert camera.rotation == cv2.ROTATE_90_COUNTERCLOCKWISE rot_color_image = camera.read() np.testing.assert_allclose( rot_color_image, manual_rot_img, rtol=1e-5, atol=MAX_PIXEL_DIFFERENCE, err_msg=error_msg ) del camera # TODO(rcadene): Add a test for a camera that doesnt support fps=60 and raises an OSError # TODO(rcadene): Add a test for a camera that supports fps=60 # Test width and height can be set camera = make_camera(**camera_kwargs, fps=30, width=1280, height=720) camera.connect() assert camera.fps == 30 assert camera.width == 1280 assert camera.height == 720 color_image = camera.read() h, w, c = color_image.shape assert h == 720 assert w == 1280 assert c == 3 del camera # Test not supported width and height raise an error camera = make_camera(**camera_kwargs, fps=30, width=0, height=0) with pytest.raises(OSError): camera.connect() del camera @pytest.mark.parametrize("camera_type, mock", TEST_CAMERA_TYPES) @require_camera def test_save_images_from_cameras(tmpdir, request, camera_type, mock): # TODO(rcadene): refactor if camera_type == "opencv": from lerobot.common.robot_devices.cameras.opencv import save_images_from_cameras elif camera_type == "intelrealsense": from lerobot.common.robot_devices.cameras.intelrealsense import save_images_from_cameras # Small `record_time_s` to speedup unit tests save_images_from_cameras(tmpdir, record_time_s=0.02, mock=mock)
lerobot/tests/test_cameras.py/0
{ "file_path": "lerobot/tests/test_cameras.py", "repo_id": "lerobot", "token_count": 2521 }
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import platform from functools import wraps from pathlib import Path import pytest import torch from lerobot import available_cameras, available_motors, available_robots from lerobot.common.robot_devices.cameras.utils import Camera from lerobot.common.robot_devices.cameras.utils import make_camera as make_camera_device from lerobot.common.robot_devices.motors.utils import MotorsBus from lerobot.common.robot_devices.motors.utils import make_motors_bus as make_motors_bus_device from lerobot.common.utils.import_utils import is_package_available DEVICE = os.environ.get("LEROBOT_TEST_DEVICE", "cuda") if torch.cuda.is_available() else "cpu" TEST_ROBOT_TYPES = [] for robot_type in available_robots: TEST_ROBOT_TYPES += [(robot_type, True), (robot_type, False)] TEST_CAMERA_TYPES = [] for camera_type in available_cameras: TEST_CAMERA_TYPES += [(camera_type, True), (camera_type, False)] TEST_MOTOR_TYPES = [] for motor_type in available_motors: TEST_MOTOR_TYPES += [(motor_type, True), (motor_type, False)] # Camera indices used for connecting physical cameras OPENCV_CAMERA_INDEX = int(os.environ.get("LEROBOT_TEST_OPENCV_CAMERA_INDEX", 0)) INTELREALSENSE_SERIAL_NUMBER = int(os.environ.get("LEROBOT_TEST_INTELREALSENSE_SERIAL_NUMBER", 128422271614)) DYNAMIXEL_PORT = os.environ.get("LEROBOT_TEST_DYNAMIXEL_PORT", "/dev/tty.usbmodem575E0032081") DYNAMIXEL_MOTORS = { "shoulder_pan": [1, "xl430-w250"], "shoulder_lift": [2, "xl430-w250"], "elbow_flex": [3, "xl330-m288"], "wrist_flex": [4, "xl330-m288"], "wrist_roll": [5, "xl330-m288"], "gripper": [6, "xl330-m288"], } FEETECH_PORT = os.environ.get("LEROBOT_TEST_FEETECH_PORT", "/dev/tty.usbmodem585A0080971") FEETECH_MOTORS = { "shoulder_pan": [1, "sts3215"], "shoulder_lift": [2, "sts3215"], "elbow_flex": [3, "sts3215"], "wrist_flex": [4, "sts3215"], "wrist_roll": [5, "sts3215"], "gripper": [6, "sts3215"], } def require_x86_64_kernel(func): """ Decorator that skips the test if plateform device is not an x86_64 cpu. """ from functools import wraps @wraps(func) def wrapper(*args, **kwargs): if platform.machine() != "x86_64": pytest.skip("requires x86_64 plateform") return func(*args, **kwargs) return wrapper def require_cpu(func): """ Decorator that skips the test if device is not cpu. """ from functools import wraps @wraps(func) def wrapper(*args, **kwargs): if DEVICE != "cpu": pytest.skip("requires cpu") return func(*args, **kwargs) return wrapper def require_cuda(func): """ Decorator that skips the test if cuda is not available. """ from functools import wraps @wraps(func) def wrapper(*args, **kwargs): if not torch.cuda.is_available(): pytest.skip("requires cuda") return func(*args, **kwargs) return wrapper def require_env(func): """ Decorator that skips the test if the required environment package is not installed. As it need 'env_name' in args, it also checks whether it is provided as an argument. If 'env_name' is None, this check is skipped. """ @wraps(func) def wrapper(*args, **kwargs): # Determine if 'env_name' is provided and extract its value arg_names = func.__code__.co_varnames[: func.__code__.co_argcount] if "env_name" in arg_names: # Get the index of 'env_name' and retrieve the value from args index = arg_names.index("env_name") env_name = args[index] if len(args) > index else kwargs.get("env_name") else: raise ValueError("Function does not have 'env_name' as an argument.") # Perform the package check package_name = f"gym_{env_name}" if env_name is not None and not is_package_available(package_name): pytest.skip(f"gym-{env_name} not installed") return func(*args, **kwargs) return wrapper def require_package_arg(func): """ Decorator that skips the test if the required package is not installed. This is similar to `require_env` but more general in that it can check any package (not just environments). As it need 'required_packages' in args, it also checks whether it is provided as an argument. If 'required_packages' is None, this check is skipped. """ @wraps(func) def wrapper(*args, **kwargs): # Determine if 'required_packages' is provided and extract its value arg_names = func.__code__.co_varnames[: func.__code__.co_argcount] if "required_packages" in arg_names: # Get the index of 'required_packages' and retrieve the value from args index = arg_names.index("required_packages") required_packages = args[index] if len(args) > index else kwargs.get("required_packages") else: raise ValueError("Function does not have 'required_packages' as an argument.") if required_packages is None: return func(*args, **kwargs) # Perform the package check for package in required_packages: if not is_package_available(package): pytest.skip(f"{package} not installed") return func(*args, **kwargs) return wrapper def require_package(package_name): """ Decorator that skips the test if the specified package is not installed. """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if not is_package_available(package_name): pytest.skip(f"{package_name} not installed") return func(*args, **kwargs) return wrapper return decorator def require_robot(func): """ Decorator that skips the test if a robot is not available The decorated function must have two arguments `request` and `robot_type`. Example of usage: ```python @pytest.mark.parametrize( "robot_type", ["koch", "aloha"] ) @require_robot def test_require_robot(request, robot_type): pass ``` """ @wraps(func) def wrapper(*args, **kwargs): # Access the pytest request context to get the is_robot_available fixture request = kwargs.get("request") robot_type = kwargs.get("robot_type") mock = kwargs.get("mock") if robot_type is None: raise ValueError("The 'robot_type' must be an argument of the test function.") if request is None: raise ValueError("The 'request' fixture must be an argument of the test function.") if mock is None: raise ValueError("The 'mock' variable must be an argument of the test function.") # Run test with a real robot. Skip test if robot connection fails. if not mock and not request.getfixturevalue("is_robot_available"): pytest.skip(f"A {robot_type} robot is not available.") return func(*args, **kwargs) return wrapper def require_camera(func): @wraps(func) def wrapper(*args, **kwargs): # Access the pytest request context to get the is_camera_available fixture request = kwargs.get("request") camera_type = kwargs.get("camera_type") mock = kwargs.get("mock") if request is None: raise ValueError("The 'request' fixture must be an argument of the test function.") if camera_type is None: raise ValueError("The 'camera_type' must be an argument of the test function.") if mock is None: raise ValueError("The 'mock' variable must be an argument of the test function.") if not mock and not request.getfixturevalue("is_camera_available"): pytest.skip(f"A {camera_type} camera is not available.") return func(*args, **kwargs) return wrapper def require_motor(func): @wraps(func) def wrapper(*args, **kwargs): # Access the pytest request context to get the is_motor_available fixture request = kwargs.get("request") motor_type = kwargs.get("motor_type") mock = kwargs.get("mock") if request is None: raise ValueError("The 'request' fixture must be an argument of the test function.") if motor_type is None: raise ValueError("The 'motor_type' must be an argument of the test function.") if mock is None: raise ValueError("The 'mock' variable must be an argument of the test function.") if not mock and not request.getfixturevalue("is_motor_available"): pytest.skip(f"A {motor_type} motor is not available.") return func(*args, **kwargs) return wrapper def mock_calibration_dir(calibration_dir): # TODO(rcadene): remove this hack # calibration file produced with Moss v1, but works with Koch, Koch bimanual and SO-100 example_calib = { "homing_offset": [-1416, -845, 2130, 2872, 1950, -2211], "drive_mode": [0, 0, 1, 1, 1, 0], "start_pos": [1442, 843, 2166, 2849, 1988, 1835], "end_pos": [2440, 1869, -1106, -1848, -926, 3235], "calib_mode": ["DEGREE", "DEGREE", "DEGREE", "DEGREE", "DEGREE", "LINEAR"], "motor_names": ["shoulder_pan", "shoulder_lift", "elbow_flex", "wrist_flex", "wrist_roll", "gripper"], } Path(str(calibration_dir)).mkdir(parents=True, exist_ok=True) with open(calibration_dir / "main_follower.json", "w") as f: json.dump(example_calib, f) with open(calibration_dir / "main_leader.json", "w") as f: json.dump(example_calib, f) with open(calibration_dir / "left_follower.json", "w") as f: json.dump(example_calib, f) with open(calibration_dir / "left_leader.json", "w") as f: json.dump(example_calib, f) with open(calibration_dir / "right_follower.json", "w") as f: json.dump(example_calib, f) with open(calibration_dir / "right_leader.json", "w") as f: json.dump(example_calib, f) # TODO(rcadene, aliberts): remove this dark pattern that overrides def make_camera(camera_type: str, **kwargs) -> Camera: if camera_type == "opencv": camera_index = kwargs.pop("camera_index", OPENCV_CAMERA_INDEX) return make_camera_device(camera_type, camera_index=camera_index, **kwargs) elif camera_type == "intelrealsense": serial_number = kwargs.pop("serial_number", INTELREALSENSE_SERIAL_NUMBER) return make_camera_device(camera_type, serial_number=serial_number, **kwargs) else: raise ValueError(f"The camera type '{camera_type}' is not valid.") # TODO(rcadene, aliberts): remove this dark pattern that overrides def make_motors_bus(motor_type: str, **kwargs) -> MotorsBus: if motor_type == "dynamixel": port = kwargs.pop("port", DYNAMIXEL_PORT) motors = kwargs.pop("motors", DYNAMIXEL_MOTORS) return make_motors_bus_device(motor_type, port=port, motors=motors, **kwargs) elif motor_type == "feetech": port = kwargs.pop("port", FEETECH_PORT) motors = kwargs.pop("motors", FEETECH_MOTORS) return make_motors_bus_device(motor_type, port=port, motors=motors, **kwargs) else: raise ValueError(f"The motor type '{motor_type}' is not valid.")
lerobot/tests/utils.py/0
{ "file_path": "lerobot/tests/utils.py", "repo_id": "lerobot", "token_count": 4768 }
.PHONY: style quality # make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!) export PYTHONPATH = src check_dirs := src tests style: ruff format --line-length 119 --target-version py310 $(check_dirs) setup.py isort $(check_dirs) setup.py quality: ruff check --line-length 119 --target-version py310 $(check_dirs) setup.py isort --check-only $(check_dirs) setup.py flake8 --max-line-length 119 $(check_dirs) setup.py # Evaluation evaluate: $(eval PARALLEL_ARGS := $(if $(PARALLEL),$(shell \ if [ "$(PARALLEL)" = "data" ]; then \ echo "data_parallel_size=$(NUM_GPUS)"; \ elif [ "$(PARALLEL)" = "tensor" ]; then \ echo "tensor_parallel_size=$(NUM_GPUS)"; \ fi \ ),)) $(if $(filter tensor,$(PARALLEL)),export VLLM_WORKER_MULTIPROC_METHOD=spawn &&,) \ MODEL_ARGS="pretrained=$(MODEL),dtype=bfloat16,$(PARALLEL_ARGS),max_model_length=32768,gpu_memory_utilisation=0.8" && \ lighteval vllm $$MODEL_ARGS "custom|$(TASK)|0|0" \ --custom-tasks src/open_r1/evaluate.py \ --use-chat-template \ --system-prompt="Please reason step by step, and put your final answer within \boxed{}." \ --output-dir data/evals/$(MODEL) # Example usage: # Single GPU: # make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24 # Data parallel: # make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24 PARALLEL=data NUM_GPUS=8 # Tensor parallel: # make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24 PARALLEL=tensor NUM_GPUS=8
open-r1/Makefile/0
{ "file_path": "open-r1/Makefile", "repo_id": "open-r1", "token_count": 627 }
#!/bin/bash #SBATCH --job-name=deepseek-r1-generation #SBATCH --partition=hopper-prod #SBATCH --qos=normal #SBATCH --nodes=2 #SBATCH --exclusive #SBATCH --gpus-per-node=8 #SBATCH --output=./logs/%x-%j.out #SBATCH --err=./logs/%x-%j.err #SBATCH --time=04-00:00:00 # Parse command line arguments while [[ $# -gt 0 ]]; do case $1 in --hf-dataset) HF_DATASET="$2" shift 2 ;; --hf-dataset-config) HF_DATASET_CONFIG="$2" shift 2 ;; --hf-dataset-split) HF_DATASET_SPLIT="$2" shift 2 ;; --prompt-column) PROMPT_COLUMN="$2" shift 2 ;; --prompt-template) PROMPT_TEMPLATE="$2" shift 2 ;; --model) MODEL="$2" shift 2 ;; --temperature) TEMPERATURE="$2" shift 2 ;; --top-p) TOP_P="$2" shift 2 ;; --max-new-tokens) MAX_NEW_TOKENS="$2" shift 2 ;; --num-generations) NUM_GENERATIONS="$2" shift 2 ;; --input-batch-size) INPUT_BATCH_SIZE="$2" shift 2 ;; --client-replicas) CLIENT_REPLICAS="$2" shift 2 ;; --timeout) TIMEOUT="$2" shift 2 ;; --retries) RETRIES="$2" shift 2 ;; --hf-output-dataset) HF_OUTPUT_DATASET="$2" shift 2 ;; --private) PRIVATE="true" shift ;; *) echo "Unknown parameter: $1" exit 1 ;; esac done if [ -z "$MODEL" ] || [ -z "$HF_DATASET" ]; then echo "Error: --model and --hf-dataset are required parameters" exit 1 fi # Set default values for optional parameters HF_DATASET_SPLIT=${HF_DATASET_SPLIT:-"train"} PROMPT_COLUMN=${PROMPT_COLUMN:-"prompt"} PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-"{{ instruction }}"} MAX_NEW_TOKENS=${MAX_NEW_TOKENS:-8192} NUM_GENERATIONS=${NUM_GENERATIONS:-1} INPUT_BATCH_SIZE=${INPUT_BATCH_SIZE:-64} CLIENT_REPLICAS=${CLIENT_REPLICAS:-1} TIMEOUT=${TIMEOUT:-900} RETRIES=${RETRIES:-0} PRIVATE=${PRIVATE:-"false"} # Print all input arguments echo "Input arguments:" echo "MODEL: $MODEL" echo "HF_DATASET: $HF_DATASET" echo "HF_DATASET_CONFIG: $HF_DATASET_CONFIG" echo "HF_DATASET_SPLIT: $HF_DATASET_SPLIT" echo "PROMPT_COLUMN: $PROMPT_COLUMN" echo "PROMPT_TEMPLATE: $PROMPT_TEMPLATE" echo "TEMPERATURE: $TEMPERATURE" echo "TOP_P: $TOP_P" echo "MAX_NEW_TOKENS: $MAX_NEW_TOKENS" echo "NUM_GENERATIONS: $NUM_GENERATIONS" echo "INPUT_BATCH_SIZE: $INPUT_BATCH_SIZE" echo "CLIENT_REPLICAS: $CLIENT_REPLICAS" echo "TIMEOUT: $TIMEOUT" echo "RETRIES: $RETRIES" echo "HF_OUTPUT_DATASET: $HF_OUTPUT_DATASET" echo "PRIVATE: $PRIVATE" echo "-------------------" set -ex module load cuda/12.4 export LD_LIBRARY_PATH=.venv/lib/python3.11/site-packages/nvidia/nvjitlink/lib echo "SLURM_JOB_ID: $SLURM_JOB_ID" echo "SLURM_JOB_NODELIST: $SLURM_JOB_NODELIST" source openr1/bin/activate # Getting the node names nodes=$(scontrol show hostnames "$SLURM_JOB_NODELIST") nodes_array=($nodes) # Get the IP address of the head node head_node=${nodes_array[0]} head_node_ip=$(srun --nodes=1 --ntasks=1 -w "$head_node" hostname --ip-address) # Start Ray head node port=6379 ip_head=$head_node_ip:$port export ip_head echo "IP Head: $ip_head" echo "Starting HEAD at $head_node" srun --nodes=1 --ntasks=1 -w "$head_node" \ ray start --head --node-ip-address="$head_node_ip" --port=$port \ --dashboard-host=0.0.0.0 \ --dashboard-port=8265 \ --block & # Give some time to head node to start... sleep 10 # Start Ray worker nodes worker_num=$((SLURM_JOB_NUM_NODES - 1)) # Start from 1 (0 is head node) for ((i = 1; i <= worker_num; i++)); do node_i=${nodes_array[$i]} echo "Starting WORKER $i at $node_i" srun --nodes=1 --ntasks=1 -w "$node_i" \ ray start --address "$ip_head" \ --block & sleep 5 done # Give some time to the Ray cluster to gather info echo "Waiting a bit for Ray cluster to gather node info..." sleep 60 # Run vllm RAY_ADDRESS="http://$head_node_ip:8265" ray job submit \ --working-dir src/open_r1 \ --no-wait \ --job-id vllm-server \ -- vllm serve $MODEL \ --tensor-parallel-size $SLURM_GPUS_PER_NODE \ --pipeline-parallel-size $SLURM_JOB_NUM_NODES \ --gpu-memory-utilization=0.85 \ --max-model-len 16384 \ --enable-chunked-prefill \ --trust-remote-code \ --distributed-executor-backend ray # wait for vllm to load the model echo "Waiting for vLLM (http://$head_node_ip:8000) server to be up..." # wait for vllm to load and serve the model while true; do if curl -s -o /dev/null -w "%{http_code}" http://$head_node_ip:8000 >/dev/null 2>&1; then echo "Received response from http://$head_node_ip:8000" break else echo "Still waiting... (Press Ctrl+C to cancel)" sleep 60 fi done echo "Checking available models..." curl http://$head_node_ip:8000/v1/models echo "Executing sanity check..." curl http://$head_node_ip:8000/v1/completions \ -H "Content-Type: application/json" \ -d "{ \"model\": \"$MODEL\", \"prompt\": \"<|begin▁of▁sentence|><|User|>hi, how are you?<|Assistant|>\", \"max_tokens\": 2048, \"temperature\": 0.6 }" # Finally submit the job to the cluster echo "Submitting job to ray cluster..." RAY_ADDRESS="http://$head_node_ip:8265" ray job submit \ --working-dir src/open_r1 \ --job-id generate \ -- python -u generate.py \ --model "$MODEL" \ --hf-dataset "$HF_DATASET" \ ${HF_DATASET_CONFIG:+--hf-dataset-config "$HF_DATASET_CONFIG"} \ --hf-dataset-split "$HF_DATASET_SPLIT" \ --prompt-column "$PROMPT_COLUMN" \ --prompt-template "$PROMPT_TEMPLATE" \ ${TEMPERATURE:+--temperature "$TEMPERATURE"} \ ${TOP_P:+--top-p "$TOP_P"} \ --max-new-tokens "$MAX_NEW_TOKENS" \ --num-generations "$NUM_GENERATIONS" \ --input-batch-size "$INPUT_BATCH_SIZE" \ --client-replicas "$CLIENT_REPLICAS" \ --timeout "$TIMEOUT" \ --retries "$RETRIES" \ ${HF_OUTPUT_DATASET:+--hf-output-dataset "$HF_OUTPUT_DATASET"} \ ${PRIVATE:+--private} \ --vllm-server-url "http://$head_node_ip:8000/v1" mkdir -p ray_logs echo "Downloading Ray job logs..." RAY_ADDRESS="http://$head_node_ip:8265" ray job logs --job-id vllm-server > ray_logs/vllm-server-${SLURM_JOB_ID}.log RAY_ADDRESS="http://$head_node_ip:8265" ray job logs --job-id generate > ray_logs/generate-${SLURM_JOB_ID}.log
open-r1/slurm/generate.slurm/0
{ "file_path": "open-r1/slurm/generate.slurm", "repo_id": "open-r1", "token_count": 3431 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # PEFT configurations and models The sheer size of today's large pretrained models - which commonly have billions of parameters - present a significant training challenge because they require more storage space and more computational power to crunch all those calculations. You'll need access to powerful GPUs or TPUs to train these large pretrained models which is expensive, not widely accessible to everyone, not environmentally friendly, and not very practical. PEFT methods address many of these challenges. There are several types of PEFT methods (soft prompting, matrix decomposition, adapters), but they all focus on the same thing, reduce the number of trainable parameters. This makes it more accessible to train and store large models on consumer hardware. The PEFT library is designed to help you quickly train large models on free or low-cost GPUs, and in this tutorial, you'll learn how to setup a configuration to apply a PEFT method to a pretrained base model for training. Once the PEFT configuration is setup, you can use any training framework you like (Transformer's [`~transformers.Trainer`] class, [Accelerate](https://hf.co/docs/accelerate), a custom PyTorch training loop). ## PEFT configurations <Tip> Learn more about the parameters you can configure for each PEFT method in their respective API reference page. </Tip> A configuration stores important parameters that specify how a particular PEFT method should be applied. For example, take a look at the following [`LoraConfig`](https://huggingface.co/ybelkada/opt-350m-lora/blob/main/adapter_config.json) for applying LoRA and [`PromptEncoderConfig`](https://huggingface.co/smangrul/roberta-large-peft-p-tuning/blob/main/adapter_config.json) for applying p-tuning (these configuration files are already JSON-serialized). Whenever you load a PEFT adapter, it is a good idea to check whether it has an associated adapter_config.json file which is required. <hfoptions id="config"> <hfoption id="LoraConfig"> ```json { "base_model_name_or_path": "facebook/opt-350m", #base model to apply LoRA to "bias": "none", "fan_in_fan_out": false, "inference_mode": true, "init_lora_weights": true, "layers_pattern": null, "layers_to_transform": null, "lora_alpha": 32, "lora_dropout": 0.05, "modules_to_save": null, "peft_type": "LORA", #PEFT method type "r": 16, "revision": null, "target_modules": [ "q_proj", #model modules to apply LoRA to (query and value projection layers) "v_proj" ], "task_type": "CAUSAL_LM" #type of task to train model on } ``` You can create your own configuration for training by initializing a [`LoraConfig`]. ```py from peft import LoraConfig, TaskType lora_config = LoraConfig( r=16, target_modules=["q_proj", "v_proj"], task_type=TaskType.CAUSAL_LM, lora_alpha=32, lora_dropout=0.05 ) ``` </hfoption> <hfoption id="PromptEncoderConfig"> ```json { "base_model_name_or_path": "roberta-large", #base model to apply p-tuning to "encoder_dropout": 0.0, "encoder_hidden_size": 128, "encoder_num_layers": 2, "encoder_reparameterization_type": "MLP", "inference_mode": true, "num_attention_heads": 16, "num_layers": 24, "num_transformer_submodules": 1, "num_virtual_tokens": 20, "peft_type": "P_TUNING", #PEFT method type "task_type": "SEQ_CLS", #type of task to train model on "token_dim": 1024 } ``` You can create your own configuration for training by initializing a [`PromptEncoderConfig`]. ```py from peft import PromptEncoderConfig, TaskType p_tuning_config = PromptEncoderConfig( encoder_reparameterization_type="MLP", encoder_hidden_size=128, num_attention_heads=16, num_layers=24, num_transformer_submodules=1, num_virtual_tokens=20, token_dim=1024, task_type=TaskType.SEQ_CLS ) ``` </hfoption> </hfoptions> ## PEFT models With a PEFT configuration in hand, you can now apply it to any pretrained model to create a [`PeftModel`]. Choose from any of the state-of-the-art models from the [Transformers](https://hf.co/docs/transformers) library, a custom model, and even new and unsupported transformer architectures. For this tutorial, load a base [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) model to finetune. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") ``` Use the [`get_peft_model`] function to create a [`PeftModel`] from the base facebook/opt-350m model and the `lora_config` you created earlier. ```py from peft import get_peft_model lora_model = get_peft_model(model, lora_config) lora_model.print_trainable_parameters() "trainable params: 1,572,864 || all params: 332,769,280 || trainable%: 0.472659014678278" ``` > [!WARNING] > When calling [`get_peft_model`], the base model will be modified *in-place*. That means, when calling [`get_peft_model`] on a model that was already modified in the same way before, this model will be further mutated. Therefore, if you would like to modify your PEFT configuration after having called [`get_peft_model()`] before, you would first have to unload the model with [`~LoraModel.unload`] and then call [`get_peft_model()`] with your new configuration. Alternatively, you can re-initialize the model to ensure a fresh, unmodified state before applying a new PEFT configuration. Now you can train the [`PeftModel`] with your preferred training framework! After training, you can save your model locally with [`~PeftModel.save_pretrained`] or upload it to the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] method. ```py # save locally lora_model.save_pretrained("your-name/opt-350m-lora") # push to Hub lora_model.push_to_hub("your-name/opt-350m-lora") ``` To load a [`PeftModel`] for inference, you'll need to provide the [`PeftConfig`] used to create it and the base model it was trained from. ```py from peft import PeftModel, PeftConfig config = PeftConfig.from_pretrained("ybelkada/opt-350m-lora") model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path) lora_model = PeftModel.from_pretrained(model, "ybelkada/opt-350m-lora") ``` <Tip> By default, the [`PeftModel`] is set for inference, but if you'd like to train the adapter some more you can set `is_trainable=True`. ```py lora_model = PeftModel.from_pretrained(model, "ybelkada/opt-350m-lora", is_trainable=True) ``` </Tip> The [`PeftModel.from_pretrained`] method is the most flexible way to load a [`PeftModel`] because it doesn't matter what model framework was used (Transformers, timm, a generic PyTorch model). Other classes, like [`AutoPeftModel`], are just a convenient wrapper around the base [`PeftModel`], and makes it easier to load PEFT models directly from the Hub or locally where the PEFT weights are stored. ```py from peft import AutoPeftModelForCausalLM lora_model = AutoPeftModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora") ``` Take a look at the [AutoPeftModel](package_reference/auto_class) API reference to learn more about the [`AutoPeftModel`] classes. ## Next steps With the appropriate [`PeftConfig`], you can apply it to any pretrained model to create a [`PeftModel`] and train large powerful models faster on freely available GPUs! To learn more about PEFT configurations and models, the following guide may be helpful: * Learn how to configure a PEFT method for models that aren't from Transformers in the [Working with custom models](../developer_guides/custom_models) guide.
peft/docs/source/tutorial/peft_model_config.md/0
{ "file_path": "peft/docs/source/tutorial/peft_model_config.md", "repo_id": "peft", "token_count": 2576 }
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any, Dict, Optional, Tuple, Union import torch from diffusers.models import UNet2DConditionModel from diffusers.utils import BaseOutput, logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNet2DConditionOutput(BaseOutput): """ Args: sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.FloatTensor class UNet2DConditionNewModel(UNet2DConditionModel): def forward( self, sample: torch.FloatTensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, guided_hint: Optional[torch.Tensor] = None, class_labels: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, mid_block_additional_residual: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, return_dict: bool = True, ) -> Union[UNet2DConditionOutput, Tuple]: r""" Args: sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states encoder_attention_mask (`torch.Tensor`): (batch, sequence_length) cross-attention mask, applied to encoder_hidden_states. True = keep, False = discard. Mask will be converted into a bias, which adds large negative values to attention scores corresponding to "discard" tokens. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). added_cond_kwargs (`dict`, *optional*): A kwargs dictionary that if specified includes additonal conditions that can be used for additonal time embeddings or encoder hidden states projections. See the configurations `encoder_hid_dim_type` and `addition_embed_type` for more information. Returns: [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ # By default samples have to be AT least a multiple of the overall upsampling factor. # The overall upsampling factor is equal to 2 ** (# num of upsampling layers). # However, the upsampling interpolation output size can be forced to fit any upsampling size # on the fly if necessary. default_overall_up_factor = 2**self.num_upsamplers # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` forward_upsample_size = False upsample_size = None if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): logger.info("Forward upsample size to force interpolation output size.") forward_upsample_size = True # ensure attention_mask is a bias, and give it a singleton query_tokens dimension # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) if attention_mask is not None: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None: encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) # 0. center input if necessary if self.config.center_input_sample: sample = 2 * sample - 1.0 # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # `Timesteps` does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) if self.class_embedding is not None: if class_labels is None: raise ValueError("class_labels should be provided when num_class_embeds > 0") if self.config.class_embed_type == "timestep": class_labels = self.time_proj(class_labels) # `Timesteps` does not contain any weights and will always return f32 tensors # there might be better ways to encapsulate this. class_labels = class_labels.to(dtype=sample.dtype) class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype) if self.config.class_embeddings_concat: emb = torch.cat([emb, class_emb], dim=-1) else: emb = emb + class_emb if self.config.addition_embed_type == "text": aug_emb = self.add_embedding(encoder_hidden_states) emb = emb + aug_emb elif self.config.addition_embed_type == "text_image": # Kadinsky 2.1 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" ) image_embs = added_cond_kwargs.get("image_embeds") text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states) aug_emb = self.add_embedding(text_embs, image_embs) emb = emb + aug_emb if self.time_embed_act is not None: emb = self.time_embed_act(emb) if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj": encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj": # Kadinsky 2.1 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" ) image_embeds = added_cond_kwargs.get("image_embeds") encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds) # 2. pre-process and insert conditioning (ControlNet) # Note: the added "guided_hint" is the only difference between this implementation and the original UNet2DConditionModel sample = self.conv_in(sample) sample = guided_hint + sample if guided_hint is not None else sample # 3. down down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb) down_block_res_samples += res_samples if down_block_additional_residuals is not None: new_down_block_res_samples = () for down_block_res_sample, down_block_additional_residual in zip( down_block_res_samples, down_block_additional_residuals ): down_block_res_sample = down_block_res_sample + down_block_additional_residual new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = new_down_block_res_samples # 4. mid if self.mid_block is not None: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, ) if mid_block_additional_residual is not None: sample = sample + mid_block_additional_residual # 5. up for i, upsample_block in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] # if we have not reached the final block and need to forward the # upsample size, we do it here if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, upsample_size=upsample_size, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, ) else: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size ) # 6. post-process if self.conv_norm_out: sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) if not return_dict: return (sample,) return UNet2DConditionOutput(sample=sample)
peft/examples/boft_controlnet/utils/unet_2d_condition.py/0
{ "file_path": "peft/examples/boft_controlnet/utils/unet_2d_condition.py", "repo_id": "peft", "token_count": 5908 }
<jupyter_start><jupyter_code>from transformers import AutoModelForCausalLM from peft import PeftModel, PeftConfig import torch from datasets import load_dataset import os from transformers import AutoTokenizer from torch.utils.data import DataLoader from transformers import default_data_collator, get_linear_schedule_with_warmup from tqdm import tqdm from datasets import load_dataset device = "cuda" model_name_or_path = "bigscience/bloomz-7b1" tokenizer_name_or_path = "bigscience/bloomz-7b1" dataset_name = "twitter_complaints" text_column = "Tweet text" label_column = "text_label" max_length = 64 lr = 1e-3 num_epochs = 50 batch_size = 8 from datasets import load_dataset dataset = load_dataset("ought/raft", dataset_name) classes = [k.replace("_", " ") for k in dataset["train"].features["Label"].names] print(classes) dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["Label"]]}, batched=True, num_proc=1, ) print(dataset) dataset["train"][0] # data preprocessing tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) if tokenizer.pad_token_id is None: tokenizer.pad_token_id = tokenizer.eos_token_id target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes]) print(target_max_length) def preprocess_function(examples): batch_size = len(examples[text_column]) inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]] targets = [str(x) for x in examples[label_column]] model_inputs = tokenizer(inputs) labels = tokenizer(targets, add_special_tokens=False) # don't add bos token because we concatenate with inputs for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] + [tokenizer.eos_token_id] # print(i, sample_input_ids, label_input_ids) model_inputs["input_ids"][i] = sample_input_ids + label_input_ids labels["input_ids"][i] = [-100] * len(sample_input_ids) + label_input_ids model_inputs["attention_mask"][i] = [1] * len(model_inputs["input_ids"][i]) # print(model_inputs) for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids) ) + sample_input_ids model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[ "attention_mask" ][i] labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length]) model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length]) labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:max_length]) model_inputs["labels"] = labels["input_ids"] return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) def test_preprocess_function(examples): batch_size = len(examples[text_column]) inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]] model_inputs = tokenizer(inputs) # print(model_inputs) for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids) ) + sample_input_ids model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[ "attention_mask" ][i] model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length]) model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length]) return model_inputs processed_datasets = dataset.map( test_preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) eval_dataset = processed_datasets["train"] test_dataset = processed_datasets["test"] eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) test_dataloader = DataLoader(test_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) print(next(iter(eval_dataloader))) print(next(iter(test_dataloader)))<jupyter_output><empty_output><jupyter_text>You can load model from hub or local- Load model from Hugging Face Hub, you can change to your own model id```pythonpeft_model_id = "username/twitter_complaints_bigscience_bloomz-7b1_LORA_CAUSAL_LM"```- Or load model form local```pythonpeft_model_id = "twitter_complaints_bigscience_bloomz-7b1_LORA_CAUSAL_LM"```<jupyter_code>from peft import PeftModel, PeftConfig max_memory = {0: "1GIB", 1: "1GIB", 2: "2GIB", 3: "10GIB", "cpu": "30GB"} peft_model_id = "smangrul/twitter_complaints_bigscience_bloomz-7b1_LORA_CAUSAL_LM" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, device_map="auto", max_memory=max_memory) model = PeftModel.from_pretrained(model, peft_model_id, device_map="auto", max_memory=max_memory) # model model.hf_device_map model.eval() i = 89 inputs = tokenizer(f'{text_column} : {dataset["test"][i]["Tweet text"]} Label : ', return_tensors="pt") print(dataset["test"][i]["Tweet text"]) print(inputs) with torch.no_grad(): outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)) model.eval() eval_preds = [] for _, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v for k, v in batch.items() if k != "labels"} with torch.no_grad(): outputs = model.generate(**batch, max_new_tokens=10) preds = outputs[:, max_length:].detach().cpu().numpy() eval_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True)) correct = 0 total = 0 for pred, true in zip(eval_preds, dataset["train"][label_column]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 print(f"{accuracy=}") print(f"{eval_preds[:10]=}") print(f"{dataset['train'][label_column][:10]=}") model.eval() test_preds = [] for _, batch in enumerate(tqdm(test_dataloader)): batch = {k: v for k, v in batch.items() if k != "labels"} with torch.no_grad(): outputs = model.generate(**batch, max_new_tokens=10) preds = outputs[:, max_length:].detach().cpu().numpy() test_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True)) if len(test_preds) > 100: break test_preds<jupyter_output><empty_output>
peft/examples/causal_language_modeling/peft_lora_clm_accelerate_big_model_inference.ipynb/0
{ "file_path": "peft/examples/causal_language_modeling/peft_lora_clm_accelerate_big_model_inference.ipynb", "repo_id": "peft", "token_count": 2945 }
<jupyter_start><jupyter_code>import os import torch from transformers import ( AutoTokenizer, default_data_collator, AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer, GenerationConfig, ) from peft import get_peft_model, PromptTuningInit, PromptTuningConfig, TaskType from datasets import load_dataset os.environ["CUDA_VISIBLE_DEVICES"] = "0" os.environ["TOKENIZERS_PARALLELISM"] = "false" device = "cuda" model_name_or_path = "t5-large" tokenizer_name_or_path = "t5-large" checkpoint_name = "financial_sentiment_analysis_prefix_tuning_v1.pt" text_column = "sentence" label_column = "text_label" max_length = 8 lr = 1e0 num_epochs = 5 batch_size = 8 # creating model peft_config = peft_config = PromptTuningConfig( task_type=TaskType.SEQ_2_SEQ_LM, prompt_tuning_init=PromptTuningInit.TEXT, num_virtual_tokens=20, prompt_tuning_init_text="What is the sentiment of this article?\n", inference_mode=False, tokenizer_name_or_path=model_name_or_path, ) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() model # loading dataset dataset = load_dataset("financial_phrasebank", "sentences_allagree") dataset = dataset["train"].train_test_split(test_size=0.1) dataset["validation"] = dataset["test"] del dataset["test"] classes = dataset["train"].features["label"].names dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["label"]]}, batched=True, num_proc=1, ) dataset["train"][0] # data preprocessing tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt") labels = tokenizer(targets, max_length=2, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"].shuffle() eval_dataset = processed_datasets["validation"] # training and evaluation def compute_metrics(eval_preds): preds, labels = eval_preds preds = tokenizer.batch_decode(preds, skip_special_tokens=True) labels = tokenizer.batch_decode(labels, skip_special_tokens=True) correct = 0 total = 0 for pred, true in zip(preds, labels): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total return {"accuracy": accuracy} training_args = Seq2SeqTrainingArguments( "out", per_device_train_batch_size=batch_size, learning_rate=lr, num_train_epochs=num_epochs, eval_strategy="epoch", logging_strategy="epoch", save_strategy="no", report_to=[], predict_with_generate=True, generation_config=GenerationConfig(max_length=max_length), ) trainer = Seq2SeqTrainer( model=model, tokenizer=tokenizer, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=default_data_collator, compute_metrics=compute_metrics, ) trainer.train() # saving model peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" model.save_pretrained(peft_model_id) ckpt = f"{peft_model_id}/adapter_model.bin" !du -h $ckpt from peft import PeftModel, PeftConfig peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(model, peft_model_id) model.eval() i = 107 inputs = tokenizer(dataset["validation"][text_column][i], return_tensors="pt") print(dataset["validation"][text_column][i]) print(inputs) with torch.no_grad(): outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))<jupyter_output>Aspocomp Group , headquartered in Helsinki , Finland , develops interconnection solutions for the electronics industry . {'input_ids': tensor([[ 71, 7990, 7699, 1531, 3, 6, 3, 27630, 16, 29763, 3, 6, 16458, 3, 6, 1344, 7, 1413, 28102, 1275, 21, 8, 12800, 681, 3, 5, 1]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])} tensor([[ 0, 7163, 1]]) ['neutral']
peft/examples/conditional_generation/peft_prompt_tuning_seq2seq_with_generate.ipynb/0
{ "file_path": "peft/examples/conditional_generation/peft_prompt_tuning_seq2seq_with_generate.ipynb", "repo_id": "peft", "token_count": 2021 }
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from transformers import AutoTokenizer class TokenizerMetaMath: PROMPT_NO_INPUT = ( "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n" "### Instruction:\n{query}\n\n### Response: " ) PROMPT = ( "Below is an instruction that describes a task, paired with an input that provides further context. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n{query}\n\n### Input:\n{input}\n\n### Response: " ) def format_prompt(self, query): query = query.split("\n", 1) if len(query) == 1 or query[1].strip("\n") == "": return self.PROMPT_NO_INPUT.format(query=query[0]) else: return self.PROMPT.format(query=query[0], input=query[1]) def __init__(self, tokenizer_path): self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) def __call__(self, examples): prompts = [self.format_prompt(text) for text in examples["query"]] completions = examples["response"] return self._tokenize_fn(prompts, completions) def _tokenize_fn(self, prompts, completions): prompt_tokens = self.tokenizer(prompts, add_special_tokens=False)["input_ids"] input_tokens = self.tokenizer([x + y for x, y in zip(prompts, completions)], add_special_tokens=False)[ "input_ids" ] input_tokens = [[self.tokenizer.bos_token_id] + x + [self.tokenizer.eos_token_id] for x in input_tokens] prompt_length = [len(x) + 1 for x in prompt_tokens] # +1 for the bos token input_length = [len(x) for x in input_tokens] return {"input_ids": input_tokens, "prompt_length": prompt_length, "input_length": input_length} class DataCollator: def __init__(self, eos_token_id, max_length=None): self.eos_token_id = eos_token_id self.max_length = max_length def __call__(self, batch): batch = {k: [item[k] for item in batch] for k in batch[0]} input_lengths = torch.stack(batch["input_length"]) prompt_lengths = torch.stack(batch["prompt_length"]) input_ids = torch.nn.utils.rnn.pad_sequence( batch["input_ids"], batch_first=True, padding_value=self.eos_token_id ) col_indices = torch.arange(input_ids.size(1)).unsqueeze(0) attention_mask = col_indices < input_lengths.unsqueeze(1) label_mask = torch.logical_or(col_indices < prompt_lengths.unsqueeze(1), ~attention_mask) labels = input_ids.masked_fill(label_mask, -100) if self.max_length is not None: input_ids = input_ids[:, : self.max_length] attention_mask = attention_mask[:, : self.max_length] labels = labels[:, : self.max_length] return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels}
peft/examples/eva_finetuning/utils.py/0
{ "file_path": "peft/examples/eva_finetuning/utils.py", "repo_id": "peft", "token_count": 1382 }
<jupyter_start><jupyter_code>import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from pathlib import Path from typing import Optional import psutil import json import torch import torch.nn.functional as F import torch.utils.checkpoint from torch.utils.data import Dataset import datasets import diffusers import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel from diffusers import DDPMScheduler, PNDMScheduler, StableDiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig, CLIPFeatureExtractor from peft import PeftModel, LoraConfig, get_peft_model_state_dict, set_peft_model_state_dict # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.10.0.dev0") logger = get_logger(__name__) MODEL_NAME = "CompVis/stable-diffusion-v1-4" # "stabilityai/stable-diffusion-2-1-base" INSTANCE_PROMPT = "a photo of sks dog" base_path = "/home/sourab/temp/" def get_lora_sd_pipeline( ckpt_dir, base_model_name_or_path=None, dtype=torch.float16, device="cuda", adapter_name="default" ): unet_sub_dir = os.path.join(ckpt_dir, "unet") text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder") if os.path.exists(text_encoder_sub_dir) and base_model_name_or_path is None: config = LoraConfig.from_pretrained(text_encoder_sub_dir) base_model_name_or_path = config.base_model_name_or_path if base_model_name_or_path is None: raise ValueError("Please specify the base model name or path") pipe = StableDiffusionPipeline.from_pretrained( base_model_name_or_path, torch_dtype=dtype, requires_safety_checker=False ).to(device) pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name) if os.path.exists(text_encoder_sub_dir): pipe.text_encoder = PeftModel.from_pretrained( pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name ) if dtype in (torch.float16, torch.bfloat16): pipe.unet.half() pipe.text_encoder.half() pipe.to(device) return pipe def load_adapter(pipe, ckpt_dir, adapter_name): unet_sub_dir = os.path.join(ckpt_dir, "unet") text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder") pipe.unet.load_adapter(unet_sub_dir, adapter_name=adapter_name) if os.path.exists(text_encoder_sub_dir): pipe.text_encoder.load_adapter(text_encoder_sub_dir, adapter_name=adapter_name) def set_adapter(pipe, adapter_name): pipe.unet.set_adapter(adapter_name) if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.set_adapter(adapter_name) def merging_lora_with_base(pipe, ckpt_dir, adapter_name="default"): unet_sub_dir = os.path.join(ckpt_dir, "unet") text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder") if isinstance(pipe.unet, PeftModel): pipe.unet.set_adapter(adapter_name) else: pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name) pipe.unet = pipe.unet.merge_and_unload() if os.path.exists(text_encoder_sub_dir): if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.set_adapter(adapter_name) else: pipe.text_encoder = PeftModel.from_pretrained( pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name ) pipe.text_encoder = pipe.text_encoder.merge_and_unload() return pipe def create_weighted_lora_adapter(pipe, adapters, weights, adapter_name="default"): pipe.unet.add_weighted_adapter(adapters, weights, adapter_name) if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.add_weighted_adapter(adapters, weights, adapter_name) return pipe %%time pipe = get_lora_sd_pipeline(os.path.join(base_path, "dog_dreambooth_updated"), adapter_name="dog") %%time load_adapter(pipe, os.path.join(base_path, "toy_dreambooth"), adapter_name="toy") pipe = create_weighted_lora_adapter(pipe, ["toy", "dog"], [1.0, 1.05], adapter_name="toy_dog") %%time set_adapter(pipe, adapter_name="dog") prompt = "sks dog playing fetch in the park" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image %%time set_adapter(pipe, adapter_name="toy") prompt = "narendra modi rendered in the style of <1>" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image set_adapter(pipe, adapter_name="dog") prompt = "sks dog in a big red bucket" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image set_adapter(pipe, adapter_name="toy") prompt = "superman rendered in the style of <1>, close up potrait" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image set_adapter(pipe, adapter_name="toy_dog") prompt = "sks dog rendered in the style of <1>, close up potrait, 4K HD" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image<jupyter_output><empty_output>
peft/examples/lora_dreambooth/lora_dreambooth_inference.ipynb/0
{ "file_path": "peft/examples/lora_dreambooth/lora_dreambooth_inference.ipynb", "repo_id": "peft", "token_count": 2282 }
<jupyter_start><jupyter_text>Named Entity Recognition with Peft Model 🤗 In this notebook, we will learn how to perform Named Entity Recognition(NER) on the CoNLL-2003 dataset using the Trainer class This notebook has been adapted from the main NLP course here - https://huggingface.co/learn/nlp-course/chapter7/2?fw=ptfine-tuning-the-model<jupyter_code>#install the required libraries !pip install -q datasets evaluate transformers seqeval # Import required libraries from datasets import load_dataset from transformers import AutoTokenizer, AutoModelForTokenClassification, DataCollatorForTokenClassification, TrainingArguments, Trainer, pipeline from peft import get_peft_model, LoraConfig, TaskType import evaluate import numpy as np from huggingface_hub import notebook_login raw_datasets = load_dataset("conll2003") print(raw_datasets) # Look at the tokens of the first training example raw_datasets["train"][0]["tokens"] # Look at the NER tags of the first training example raw_datasets["train"][0]["ner_tags"] # Get the label names for the NER tags ner_feature = raw_datasets["train"].features["ner_tags"] label_names = ner_feature.feature.names label_names words = raw_datasets["train"][0]["tokens"] labels = raw_datasets["train"][0]["ner_tags"] line1 = "" line2 = "" for word, label in zip(words, labels): full_label = label_names[label] max_length = max(len(word), len(full_label)) line1 += word + " " * (max_length - len(word) + 1) line2 += full_label + " " * (max_length - len(full_label) + 1) print(line1) print(line2) # Load the tokenizer model_checkpoint = "bert-base-cased" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) # Tokenize the first training example inputs = tokenizer(raw_datasets["train"][0]["tokens"], is_split_into_words=True) inputs.tokens() def align_labels_with_tokens(labels, word_ids): new_labels = [] current_word = None for word_id in word_ids: if word_id != current_word: # Start of a new word! current_word = word_id label = -100 if word_id is None else labels[word_id] new_labels.append(label) elif word_id is None: # Special token new_labels.append(-100) else: # Same word as previous token label = labels[word_id] # If the label is B-XXX we change it to I-XXX if label % 2 == 1: label += 1 new_labels.append(label) return new_labels labels = raw_datasets["train"][0]["ner_tags"] word_ids = inputs.word_ids() print(labels) print(align_labels_with_tokens(labels, word_ids)) def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer( examples["tokens"], truncation=True, is_split_into_words=True ) all_labels = examples["ner_tags"] new_labels = [] for i, labels in enumerate(all_labels): word_ids = tokenized_inputs.word_ids(i) new_labels.append(align_labels_with_tokens(labels, word_ids)) tokenized_inputs["labels"] = new_labels return tokenized_inputs tokenized_datasets = raw_datasets.map( tokenize_and_align_labels, batched=True, remove_columns=raw_datasets["train"].column_names, ) data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer) for i in range(2): print(tokenized_datasets["train"][i]["labels"]) metric = evaluate.load("seqeval") # Create label mappings id2label = {i: label for i, label in enumerate(label_names)} label2id = {v: k for k, v in id2label.items()} # Load the pre-trained model model = AutoModelForTokenClassification.from_pretrained( model_checkpoint, id2label=id2label, label2id=label2id, ) model.config.num_labels model # Configure LoRA (Low-Rank Adaptation) for fine-tuning peft_config = LoraConfig(target_modules = ["query", "key"], task_type = TaskType.TOKEN_CLS) model = get_peft_model(model, peft_config) model.print_trainable_parameters() def compute_metrics(eval_preds): logits, labels = eval_preds predictions = np.argmax(logits, axis=-1) # Remove ignored index (special tokens) and convert to labels true_labels = [[label_names[l] for l in label if l != -100] for label in labels] true_predictions = [ [label_names[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] all_metrics = metric.compute(predictions=true_predictions, references=true_labels) return { "precision": all_metrics["overall_precision"], "recall": all_metrics["overall_recall"], "f1": all_metrics["overall_f1"], "accuracy": all_metrics["overall_accuracy"], } notebook_login() args = TrainingArguments( "bert-finetuned-ner-lora", evaluation_strategy="epoch", per_device_train_batch_size=32, # decrease this for OOM error per_device_eval_batch_size=64, save_strategy="epoch", learning_rate=2e-3, num_train_epochs=5, weight_decay=0.01, load_best_model_at_end=True, do_eval=True, do_predict=True, metric_for_best_model="accuracy", label_names=["labels"], push_to_hub=True, ) trainer = Trainer( model=model, args=args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation"], data_collator=data_collator, tokenizer=tokenizer, compute_metrics=compute_metrics ) trainer.train() # Replace this with your own checkpoint model_checkpoint = "bert-finetuned-ner-lora" token_classifier = pipeline( "token-classification", model = model_checkpoint, aggregation_strategy = "simple" ) token_classifier("My name is Jino.")<jupyter_output>Hardware accelerator e.g. GPU is available in the environment, but no `device` argument is passed to the `Pipeline` object. Model will be on CPU.
peft/examples/token_classification/peft_lora_ner.ipynb/0
{ "file_path": "peft/examples/token_classification/peft_lora_ner.ipynb", "repo_id": "peft", "token_count": 2233 }
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import importlib.metadata as importlib_metadata import platform from functools import lru_cache import packaging.version import torch @lru_cache def is_bnb_available() -> bool: return importlib.util.find_spec("bitsandbytes") is not None @lru_cache def is_bnb_4bit_available() -> bool: if not is_bnb_available(): return False import bitsandbytes as bnb return hasattr(bnb.nn, "Linear4bit") @lru_cache def is_auto_gptq_available(): if importlib.util.find_spec("auto_gptq") is not None: AUTOGPTQ_MINIMUM_VERSION = packaging.version.parse("0.5.0") version_autogptq = packaging.version.parse(importlib_metadata.version("auto_gptq")) if AUTOGPTQ_MINIMUM_VERSION <= version_autogptq: return True else: raise ImportError( f"Found an incompatible version of auto-gptq. Found version {version_autogptq}, " f"but only versions above {AUTOGPTQ_MINIMUM_VERSION} are supported" ) @lru_cache def is_gptqmodel_available(): if importlib.util.find_spec("gptqmodel") is not None: GPTQMODEL_MINIMUM_VERSION = packaging.version.parse("1.7.0") OPTIMUM_MINIMUM_VERSION = packaging.version.parse("1.23.99") version_gptqmodel = packaging.version.parse(importlib_metadata.version("gptqmodel")) if GPTQMODEL_MINIMUM_VERSION <= version_gptqmodel: if is_optimum_available(): version_optimum = packaging.version.parse(importlib_metadata.version("optimum")) if OPTIMUM_MINIMUM_VERSION <= version_optimum: return True else: raise ImportError( f"gptqmodel requires optimum version {OPTIMUM_MINIMUM_VERSION} or higher. Found version {version_optimum}, " f"but only versions above {OPTIMUM_MINIMUM_VERSION} are supported" ) else: raise ImportError( f"gptqmodel requires optimum version {OPTIMUM_MINIMUM_VERSION} or higher to be installed." ) else: raise ImportError( f"Found an incompatible version of gptqmodel. Found version {version_gptqmodel}, " f"but only versions above {GPTQMODEL_MINIMUM_VERSION} are supported" ) @lru_cache def is_optimum_available() -> bool: return importlib.util.find_spec("optimum") is not None @lru_cache def is_torch_tpu_available(check_device=True): "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" if importlib.util.find_spec("torch_xla") is not None: if check_device: # We need to check if `xla_device` can be found, will raise a RuntimeError if not try: import torch_xla.core.xla_model as xm _ = xm.xla_device() return True except RuntimeError: return False return True return False @lru_cache def is_aqlm_available(): return importlib.util.find_spec("aqlm") is not None @lru_cache def is_auto_awq_available(): return importlib.util.find_spec("awq") is not None @lru_cache def is_eetq_available(): return importlib.util.find_spec("eetq") is not None @lru_cache def is_hqq_available(): return importlib.util.find_spec("hqq") is not None @lru_cache def is_torchao_available(): if importlib.util.find_spec("torchao") is None: return False TORCHAO_MINIMUM_VERSION = packaging.version.parse("0.4.0") try: torchao_version = packaging.version.parse(importlib_metadata.version("torchao")) except importlib_metadata.PackageNotFoundError: # Same idea as in diffusers: # https://github.com/huggingface/diffusers/blob/9f06a0d1a4a998ac6a463c5be728c892f95320a8/src/diffusers/utils/import_utils.py#L351-L357 # It's not clear under what circumstances `importlib_metadata.version("torchao")` can raise an error even # though `importlib.util.find_spec("torchao") is not None` but it has been observed, so adding this for # precaution. return False if torchao_version < TORCHAO_MINIMUM_VERSION: raise ImportError( f"Found an incompatible version of torchao. Found version {torchao_version}, " f"but only versions above {TORCHAO_MINIMUM_VERSION} are supported" ) return True @lru_cache def is_xpu_available(check_device=False): """ Checks if XPU acceleration is available and potentially if a XPU is in the environment """ system = platform.system() if system == "Darwin": return False else: if check_device: try: # Will raise a RuntimeError if no XPU is found _ = torch.xpu.device_count() return torch.xpu.is_available() except RuntimeError: return False return hasattr(torch, "xpu") and torch.xpu.is_available()
peft/src/peft/import_utils.py/0
{ "file_path": "peft/src/peft/import_utils.py", "repo_id": "peft", "token_count": 2355 }
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from dataclasses import dataclass, field from typing import Literal, Optional, Union from torch import nn from peft.config import PeftConfig from peft.utils import PeftType @dataclass class LoraRuntimeConfig: """ This is the sub-configuration class to store the runtime configurations for the model. Args: ephemeral_gpu_offload (`bool`): Whether to use ephemeral GPU offloading for models partially kept in CPU memory. """ ephemeral_gpu_offload: bool = field( default=False, metadata={ "help": ( "Whether to use ephemeral GPU offloading for models partially kept in CPU memory. Ephemeral GPU offloading result in " "the data involved in intense operations being momentarily copied over to the GPU, and the results copied " "back to CPU. There is a momentary VRAM overhead, but operations are generally orders of magnitude faster " "compared to performing them on the CPU. This is useful when parts of the model and/or components (such " "as adapters) are kept in CPU memory until they are needed. Rather than perform expensive operations on " "small data, the data is transferred to the GPU on-demand, the operation(s) performed, and the results " "moved back to CPU memory. Currently only affects DoRA initialization." ) }, ) @dataclass class LoftQConfig: """ This is the sub-configuration class to store the configuration of a [`LoraModel`]. Args: bits_pattern (`dict`): The mapping from layer names or regexp expression to bits which are different from the default bits specified by `bits`. For example, `{model.decoder.layers.0.encoder_attn.k_proj: 2`}. bits (`int`): Quantization bits for LoftQ. iter (`int`): Alternating iterations for LoftQ. fake (`bool`): True: use fp16/fp32; used for first time to save weights. False: use bitsandbytes 4bit linear models. weights can't be saved. Recommend to set to True, save the weights and load the saved weights in 4 bits. """ loftq_bits: int = field(default=4, metadata={"help": "Quantization bits for LoftQ"}) loftq_iter: int = field(default=1, metadata={"help": "Alternating iterations for LoftQ"}) @dataclass class EvaConfig: """ This is the sub-configuration class to store the configuration for a data-driven initialization via EVA. EVA was introduced in <a href='https://arxiv.org/abs/2410.07170'>Explained Variance Adaptation</a>. Args: rho (`float`): Rho value for EVA redistribution (>= 1.0). The maximum rank for a layer is lora_r * rho. Default is 2.0, meaning the maximum rank allowed for a layer is 2r. Increasing rho will allow for a higher degree of redistribution of ranks across layers. Some pre-trained models might be more sensitive to a rank redistribution. It can therefore be beneficial to try rho=1.0 (no redistribution) if the performance is lower than expected. tau (`float`): Cosine similarity threshold for early stopping. Compares the cosine similarity of right-singular vectors between two consecutive SVD steps. If the cosine similarity is above this threshold, the SVD iteration is stopped. Default is 0.99. use_label_mask (`bool`): Use label mask for EVA initialization. This means that positions where labels=label_mask_value are ignored for the SVD computation. Setting use_label_mask=True is preferred in most cases and can be especially beneficial for multi-turn conversations. The default value is True. Filtering out items based on the label mask can sometimes lead to a small batch size and as a result instabilities in the SVD computation. For cases where a large share of batch items would be filtered out, set use_label_mask=False. label_mask_value (`int`): If use_label_mask=True the value to look for to mask out ignored tokens. Default is -100. whiten (`bool`): Apply whitening to singular vectors. Default is False. Whitening has been shown to be beneficial for EVA in the vision domain. adjust_scaling_factors (`bool`): Adjust LoRA scaling factors after the rank redistribution. Setting this to True means the scaling factors are adjusted so that all LoRA gradients have the same scale regardless of their rank. Default is True. """ rho: float = field(default=2.0, metadata={"help": "Rho value for EVA redistribution"}) tau: float = field(default=0.99, metadata={"help": "Cosine similarity threshold for early stopping"}) use_label_mask: bool = field(default=True, metadata={"help": "Use label mask for EVA initialization"}) label_mask_value: int = field( default=-100, metadata={"help": "if use_label_mask=True the value to look for to mask out ignored tokens"} ) whiten: bool = field(default=False, metadata={"help": "Apply whitening to singular vectors"}) adjust_scaling_factors: bool = field( default=True, metadata={"help": "Adjust LoRA scaling factors after the rank redistribution"}, ) def __post_init__(self): if self.rho < 1.0: raise ValueError("`rho` must be >= 1.0") if self.tau < 0.0 or self.tau > 1.0: raise ValueError("`tau` must be between 0.0 and 1.0.") @dataclass class CordaConfig: """ This is the sub-configuration class to store the configuration of a [`LoraModel`]. Args: cache_file (`Optional[str]`): File to store the SVD cache. The SVD cache is much smaller than the residual model (for example, residual model of Llama-3-8b is 15GB, while SVD cache is 1.4GB), but with SVD cache and original model weights, residual model weights can be built quickly. If you need to reuse residual model weights with limited storage, you can store the SVD cache instead. covariance_file (`Optional[str]`): File to store the covariance matrix. If you wish to train multiple models with different ranks, but they sample from the same dataset, you can store the covariance matrix and reuse it for different ranks. Note that covariance file is usually large (comparable to model size), so you will need sufficient storage. corda_method (`Literal["ipm", "kpm"]`): Method to build adapter. The KPM (Knowledge-Preserved Mode) not only achieves better performance than LoRA on fine-tuning tasks, but also mitigates the catastrophic forgetting of pre-trained world knowledge. When preserving pre-trained knowledge is not a concern, the IPM (Instruction-Previewed Mode) is favored because it can further accelerate convergence and enhance the fine-tuning performance. Defaults to `'ipm'`. verbose (`bool`): If true, prints the progress of CorDA initialization. Defaults to `False`. use_float16_for_covariance (`bool`): If true, uses float16 for the covariance matrix. This can reduce the memory usage of the covariance matrix by half, but may lead to numerical instability. Defaults to `False`. prune_temporary_fields (`bool`): If true, temporary fields generated in CorDA preprocessing will be pruned. Defaults to `True`. """ cache_file: Optional[str] = field( default=None, metadata={ "help": ( "File to store the SVD cache. The SVD cache is much smaller than the residual model (for example, " "residual model of Llama-3-8b is 15GB, while SVD cache is 1.4GB), but with SVD cache and original model " "weights, residual model weights can be built quickly. If you need to reuse residual model weights with " "limited storage, you can store the SVD cache instead." ) }, ) covariance_file: Optional[str] = field( default=None, metadata={ "help": ( "File to store the covariance matrix. If you wish to train multiple models with different ranks, but " "they sample from the same dataset, you can store the covariance matrix and reuse it for different ranks. " "Note that covariance file is usually large (comparable to model size), so you will need sufficient storage." ) }, ) corda_method: Literal["ipm", "kpm"] = field( default="ipm", metadata={ "help": ( "Method to build adapter. The KPM not only achieves better performance than LoRA on fine-tuning tasks, but " "also mitigates the catastrophic forgetting of pre-trained world knowledge. When preserving pre-trained " "knowledge is not a concern, the IPM is favored because it can further accelerate convergence and enhance " "the fine-tuning performance." ) }, ) verbose: bool = field(default=False, metadata={"help": "If true, prints the progress of CorDA initialization."}) use_float16_for_covariance: bool = field( default=False, metadata={ "help": ( "If true, uses float16 for the covariance matrix. This can reduce the memory usage of the covariance matrix " "by half, but may lead to numerical instability." ) }, ) prune_temporary_fields: bool = field( default=True, metadata={"help": "If true, temporary fields generated in CorDA preprocessing will be pruned."} ) @dataclass class LoraConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`LoraModel`]. Args: r (`int`): Lora attention dimension (the "rank"). target_modules (`Optional[Union[List[str], str]]`): The names of the modules to apply the adapter to. If this is specified, only the modules with the specified names will be replaced. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen (if the model is a PreTrainedModel, the output layer excluded). If this is not specified, modules will be chosen according to the model architecture. If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually. exclude_modules (`Optional[Union[List[str], str]]`): The names of the modules to not apply the adapter. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. lora_alpha (`int`): The alpha parameter for Lora scaling. lora_dropout (`float`): The dropout probability for Lora layers. fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`. bias (`str`): Bias type for LoRA. Can be 'none', 'all' or 'lora_only'. If 'all' or 'lora_only', the corresponding biases will be updated during training. Be aware that this means that, even when disabling the adapters, the model will not produce the same output as the base model would have without adaptation. use_rslora (`bool`): When set to True, uses <a href='https://doi.org/10.48550/arXiv.2312.03732'>Rank-Stabilized LoRA</a> which sets the adapter scaling factor to `lora_alpha/math.sqrt(r)`, since it was proven to work better. Otherwise, it will use the original default value of `lora_alpha/r`. modules_to_save (`List[str]`): List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint. init_lora_weights (`bool` | `Literal["gaussian", "eva", "olora", "pissa", "pissa_niter_[number of iters]", "corda", "loftq"]`): How to initialize the weights of the adapter layers. Passing True (default) results in the default initialization from the reference implementation from Microsoft, with the LoRA B weight being set to 0. This means that without further training, the LoRA adapter will be a no-op. Setting the initialization to False leads to random initialization of LoRA A and B, meaning that LoRA is not a no-op before training; this setting is intended for debugging purposes. Passing 'gaussian' results in Gaussian initialization scaled by the LoRA rank for linear and layers. Pass `'loftq'` to use LoftQ initialization. Passing `'eva'` results in a data-driven initialization of <ahref='https://arxiv.org/abs/2410.07170' >Explained Variance Adaptation</a>. EVA initalizes LoRA based on the SVD of layer input activations and achieves SOTA performance due to its ability to adapt to the finetuning data. Pass `'olora'` to use OLoRA initialization. Passing `'pissa'` results in the initialization of <ahref='https://arxiv.org/abs/2404.02948' >Principal Singular values and Singular vectors Adaptation (PiSSA)</a>, which converges more rapidly than LoRA and ultimately achieves superior performance. Moreover, PiSSA reduces the quantization error compared to QLoRA, leading to further enhancements. Passing `'pissa_niter_[number of iters]'` initiates Fast-SVD-based PiSSA initialization, where `[number of iters]` indicates the number of subspace iterations to perform FSVD, and must be a nonnegative integer. When `[number of iters]` is set to 16, it can complete the initialization of a 7B model within seconds, and the training effect is approximately equivalent to using SVD. Passing `'corda'` results in the initialization of <ahref='https://arxiv.org/abs/2406.05223' >Context-Oriented Decomposition Adaptation</a>, which converges even more rapidly than PiSSA in Instruction-Previewed Mode, and preserves world knowledge better than LoRA in Knowledge-Preserved Mode. layers_to_transform (`Union[List[int], int]`): The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices that are specified in this list. If a single integer is passed, it will apply the transformations on the layer at this index. layers_pattern (`Optional[Union[List[str], str]]`): The layer pattern name, used only if `layers_to_transform` is different from `None`. This should target the `nn.ModuleList` of the model, which is often called `'layers'` or `'h'`. rank_pattern (`dict`): The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. alpha_pattern (`dict`): The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `lora_alpha`. megatron_config (`Optional[dict]`): The TransformerConfig arguments for Megatron. It is used to create LoRA's parallel linear layer. You can get it like this, `core_transformer_config_from_args(get_args())`, these two functions being from Megatron. The arguments will be used to initialize the TransformerConfig of Megatron. You need to specify this parameter when you want to apply LoRA to the ColumnParallelLinear and RowParallelLinear layers of megatron. megatron_core (`Optional[str]`): The core module from Megatron to use, defaults to `"megatron.core"`. loftq_config (`Optional[LoftQConfig]`): The configuration of LoftQ. If this is not None, then LoftQ will be used to quantize the backbone weights and initialize Lora layers. Also pass `init_lora_weights='loftq'`. Note that you should not pass a quantized model in this case, as LoftQ will quantize the model itself. eva_config (`Optional[EvaConfig]`): The configuration of EVA. At a minimum the dataset argument needs to be set (use the same dataset as for finetuning). corda_config (`Optional[CordaConfig]`): The configuration of CorDA. If this is not None, then CorDA will be used to build the adapter layers. Also pass `init_lora_weights='corda'`. use_dora (`bool`): Enable 'Weight-Decomposed Low-Rank Adaptation' (DoRA). This technique decomposes the updates of the weights into two parts, magnitude and direction. Direction is handled by normal LoRA, whereas the magnitude is handled by a separate learnable parameter. This can improve the performance of LoRA especially at low ranks. Right now, DoRA only supports linear and Conv2D layers. DoRA introduces a bigger overhead than pure LoRA, so it is recommended to merge weights for inference. For more information, see https://arxiv.org/abs/2402.09353. layer_replication (`List[Tuple[int, int]]`): Build a new stack of layers by stacking the original model layers according to the ranges specified. This allows expanding (or shrinking) the model without duplicating the base model weights. The new layers will all have separate LoRA adapters attached to them. runtime_config (`LoraRuntimeConfig`): Runtime configurations (which are not saved or restored). lora_bias (`bool`): Defaults to `False`. Whether to enable the bias term for the LoRA B parameter. Typically, this should be disabled. The main use case for this is when the LoRA weights were extracted from fully fine-tuned parameters so the bias of those parameters can be taken into account. """ r: int = field(default=8, metadata={"help": "Lora attention dimension"}) target_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": ( "List of module names or regex expression of the module names to replace with LoRA." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'." "This can also be a wildcard 'all-linear' which matches all linear/Conv1D " "(if the model is a PreTrainedModel, the output layer excluded)." "If not specified, modules will be chosen according to the model architecture, If the architecture is " "not known, an error will be raised -- in this case, you should specify the target modules manually." ), }, ) exclude_modules: Optional[Union[list[str], str]] = field( default=None, metadata={"help": "List of module names or regex expression of the module names to exclude from Lora."}, ) lora_alpha: int = field(default=8, metadata={"help": "Lora alpha"}) lora_dropout: float = field(default=0.0, metadata={"help": "Lora dropout"}) fan_in_fan_out: bool = field( default=False, metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, ) bias: Literal["none", "all", "lora_only"] = field( default="none", metadata={"help": "Bias type for Lora. Can be 'none', 'all' or 'lora_only'"} ) use_rslora: bool = field( default=False, metadata={ "help": ( "When set to True, uses <a href='https://doi.org/10.48550/arXiv.2312.03732'>Rank-Stabilized LoRA</a>" " which sets the adapter scaling factor to `lora_alpha/math.sqrt(r)`, since it" " was proven to work better. Otherwise, it will use the original default" " value of `lora_alpha/r`." ) }, ) modules_to_save: Optional[list[str]] = field( default=None, metadata={ "help": "List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) init_lora_weights: ( bool | Literal["gaussian", "eva", "olora", "pissa", "pissa_niter_[number of iters]", "corda", "loftq"] ) = field( default=True, metadata={ "help": ( "How to initialize the weights of the LoRA layers. " "Passing True (default) results in the default initialization from the reference implementation from " "Microsoft, with the LoRA B weight being set to 0. This means that without further training, the LoRA " "adapter will be a no-op. " "Setting the initialization to False leads to random initialization of LoRA A and B, meaning that LoRA " "is not a no-op before training; this setting is intended for debugging purposes. " "Passing `'gaussian'` results in Gaussian initialization scaled by the LoRA rank for linear and layers. " "Passing `'eva'` results in a data-driven initialization of Explained Variance Adaptation. " "Passing `'olora'` results in OLoRA initialization. " "Passing `'pissa'` results in PiSSA initialization. " "Passing `'pissa_niter_[number of iters]'` initiates Fast-SVD-based PiSSA initialization, where " "[number of iters] indicates the number of subspace iterations to perform fsvd, and must be a " "nonnegative integer. " "Passing `'corda'` results in CorDA initialization. " "Pass `'loftq'` to use LoftQ initialization." ), }, ) layers_to_transform: Optional[Union[list[int], int]] = field( default=None, metadata={ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index. " "This only works when target_modules is a list of str." }, ) layers_pattern: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern." "This only works when target_modules is a list of str. This should target the `nn.ModuleList` of the " "model, which is often called `'layers'` or `'h'`." }, ) rank_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. " "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}" ) }, ) alpha_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `lora_alpha`. " "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}" ) }, ) megatron_config: Optional[dict] = field( default=None, metadata={ "help": ( "The TransformerConfig from Megatron. It is used to create LoRA's parallel linear layer." "You can get it like this, `core_transformer_config_from_args(get_args())`, " "these two functions being from Megatron." "You need to specify this parameter when you want to apply LoRA to the ColumnParallelLinear and " "RowParallelLinear layers of megatron." "It should be noted that we may not be able to use the `save_pretrained` and `from_pretrained` " "functions, because TransformerConfig may not necessarily be serialized." "But when using megatron, we can use `get_peft_model_state_dict` function and " "megatron's framework, they can also save and load models and configurations." ) }, ) megatron_core: Optional[str] = field( default="megatron.core", metadata={ "help": ( "The core module from Megatron, it is used to create LoRA's parallel linear layer. " "It only needs to be passed in when you need to use your own modified megatron core module. " "Otherwise, it will use the default value `megatron.core`. " ) }, ) # dict type is used when loading config.json loftq_config: Union[LoftQConfig, dict] = field( default_factory=dict, metadata={ "help": ( "The configuration of LoftQ. If this is passed, then LoftQ will be used to quantize the backbone " "weights and initialize Lora layers. Also set `init_lora_weights='loftq'` in this case." ) }, ) eva_config: Optional[EvaConfig] = field( default=None, metadata={ "help": ( "The configuration of EVA. If this is passed, then EVA will be used to intialize the LoRA layers. " "Also set `init_lora_weights='eva'` in this case. " ) }, ) corda_config: Optional[CordaConfig] = field( default=None, metadata={ "help": ( "The configuration of CorDA. If this is passed, then CorDA will be used to build the adapter layers. " "Also set `init_lora_weights='corda'` in this case." ) }, ) use_dora: bool = field( default=False, metadata={ "help": ( "Enable <a href='https://arxiv.org/abs/2402.09353'>'Weight-Decomposed Low-Rank Adaptation' (DoRA)</a>. This technique decomposes the updates of the " "weights into two parts, magnitude and direction. Direction is handled by normal LoRA, whereas the " "magnitude is handled by a separate learnable parameter. This can improve the performance of LoRA, " "especially at low ranks. Right now, DoRA only supports linear and Conv2D layers. DoRA introduces a bigger" "overhead than pure LoRA, so it is recommended to merge weights for inference." ) }, ) # Enables replicating layers in a model to expand it to a larger model. layer_replication: Optional[list[tuple[int, int]]] = field( default=None, metadata={ "help": ( "This enables using LoRA to effectively expand a transformer model to a larger size by repeating some layers. " "The transformation handles models (currently Llama, Bert or Falcon compatible architectures) with " "a module list in the model which it modifies to expand the number of modules. " "Base weights are shared so the memory usage is close to the original model. The intended use is these base weights " "remain fixed during finetuning but each layer has a separate LoRA adapter so the layers can be specialed via " "the adapter layers fit during fine tuning." "The format is a list of [start, end) pairs which specify the layer ranges to stack. For example:\n" " Original model has 5 layers labelled by their position in the model: `[0, 1, 2, 3, 4]`\n" " layer_replication: `[[0, 4], [2, 5]]`\n" " Final model will have this arrangement of original layers: `[0, 1, 2, 3, 2, 3, 4]`\n" "This format is based on what is used for pass-through merges in mergekit. It makes it simple to select sequential " "ranges of a model and stack them while reusing layers at either end of each sequence." ) }, ) runtime_config: LoraRuntimeConfig = field( default_factory=LoraRuntimeConfig, metadata={"help": "Runtime configurations"} ) lora_bias: bool = field( default=False, metadata={ "help": ( "Whether to enable the bias term for the LoRA B parameter. Typically, this should be disabled. The " "main use case for this is when the LoRA weights were extracted from fully fine-tuned parameters so " "the bias of those parameters can be taken into account." ) }, ) def to_dict(self): """ Returns the configuration for your adapter model as a dictionary. Removes runtime configurations. """ rv = super().to_dict() rv.pop("runtime_config") return rv def __post_init__(self): super().__post_init__() self.peft_type = PeftType.LORA self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) self.exclude_modules = ( set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules ) # if target_modules is a regex expression, then layers_to_transform should be None if isinstance(self.target_modules, str) and self.layers_to_transform is not None: raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.") # if target_modules is a regex expression, then layers_pattern should be None if isinstance(self.target_modules, str) and self.layers_pattern is not None: raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.") # check for layers_to_transform and layers_pattern if self.layers_pattern and not self.layers_to_transform: raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified. ") if self.use_dora and self.megatron_config: raise ValueError("DoRA does not support megatron_core, please set `use_dora=False`.") # handle init_lora_weights and loftq_config if self.init_lora_weights == "loftq": import importlib if not importlib.util.find_spec("scipy"): raise ImportError("The required package 'scipy' is not installed. Please install it to continue.") if not self.loftq_config: raise ValueError("`loftq_config` must be specified when `init_lora_weights` is 'loftq'.") if not isinstance(self.loftq_config, dict): # convert loftq_config to dict self.loftq_config = vars(self.loftq_config) elif self.loftq_config: self.loftq_config = {} warnings.warn("`loftq_config` specified but will be ignored when `init_lora_weights` is not 'loftq'.") elif self.init_lora_weights == "eva" and self.eva_config is None: warnings.warn("`init_lora_weights` is 'eva' but `eva_config` is not specified. Using default EVA config.") self.eva_config = EvaConfig() elif self.init_lora_weights != "eva" and self.eva_config is not None: warnings.warn("`eva_config` specified but will be ignored when `init_lora_weights` is not 'eva'.") elif self.init_lora_weights == "corda" and self.corda_config is None: warnings.warn( "`init_lora_weights` is 'corda' but `corda_config` is not specified. Using default CorDA config." ) self.corda_config = CordaConfig() elif self.init_lora_weights != "corda" and self.corda_config is not None: warnings.warn("`corda_config` specified but will be ignored when `init_lora_weights` is not 'corda'.") if self.lora_bias: if self.init_lora_weights not in (True, False): raise ValueError( f"The argument lora_bias=True is only supported with init_lora_weights=True or False, got " f"init_lora_weights={self.init_lora_weights} instead." ) if self.use_dora: raise ValueError("The argument lora_bias=True is not supported for DoRA, please pass use_dora=False") # Using post training conversion of modified base weights to restore their initial values PiSSA/CorDA/OLoRA cannot # be correctly done when using rslora + rank_pattern/alpha_pattern. We can't really know if the user intends # this when they'll eventually call save_pretrained (i.e. if they'll pass # path_initial_model_for_weight_conversionl). Therefore, we only warn but don't raise an error here. if ( self.use_rslora and (self.rank_pattern or self.alpha_pattern) and ( (isinstance(self.init_lora_weights, str) and (self.init_lora_weights.startswith("pissa"))) or (self.init_lora_weights == "olora") or (self.init_lora_weights == "corda") ) ): msg = ( "Using Rank-Stabilized LoRA with rank_pattern/alpha_pattern and post-training conversion of modified " "base weights PiSSA/CorDA/OLoRA means that you won't be able to pass " "`path_initial_model_for_weight_conversion` to `save_pretrained` to restore the initial values of the " "base weights; if you intend to do this, please ensure not to use rslora or rank_pattern/alpha_pattern." ) warnings.warn(msg) self._custom_modules: Optional[dict[type[nn.Module], type[nn.Module]]] = None def _register_custom_module(self, mapping: dict[type[nn.Module], type[nn.Module]]) -> None: """ Experimental API to support providing custom LoRA layers. This API is subject to change, you should carefully read the docs before deciding to use it: https://huggingface.co/docs/peft/developer_guides/custom_models To register custom LoRA module types, call this method with a `mapping` argument that is a dict that maps from the target layer type to the custom LoRA layer type. The dict can contain multiple items if you wish to target multiple layer types. The target layer type can be any nn.Module that we currently don't support in PEFT, whether that is an official PyTorch layer type or a custom layer type. The custom LoRA module class has to be implemented by the user and follow the PEFT conventions for LoRA layers. """ if self._custom_modules is None: self._custom_modules = {} self._custom_modules.update(mapping)
peft/src/peft/tuners/lora/config.py/0
{ "file_path": "peft/src/peft/tuners/lora/config.py", "repo_id": "peft", "token_count": 13800 }
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from peft.tuners.prompt_tuning import PromptEmbedding from peft.utils import TaskType from peft.utils.save_and_load import torch_load from .config import MultitaskPromptTuningConfig, MultitaskPromptTuningInit # This code is adapted for the paper: https://arxiv.org/abs/2303.02861 and # constitutes the work done at MIT-IBM Watson Research Lab. class MultitaskPromptEmbedding(PromptEmbedding): def __init__(self, config: MultitaskPromptTuningConfig, word_embeddings): super().__init__(config, word_embeddings) self.num_tasks = config.num_tasks self.num_ranks = config.num_ranks self.num_virtual_tokens = config.num_virtual_tokens self.num_transformer_submodules = config.num_transformer_submodules if self.num_transformer_submodules is None: self.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1 self.token_dim = config.token_dim total_virtual_tokens = self.num_virtual_tokens * self.num_transformer_submodules self.prefix_task_cols = torch.nn.Parameter( torch.normal( mean=0, std=0.02, size=(self.num_tasks, total_virtual_tokens, self.num_ranks), ) ) self.prefix_task_rows = torch.nn.Parameter( torch.normal( mean=0, std=0.02, size=(self.num_tasks, self.num_ranks, self.token_dim), ) ) if config.prompt_tuning_init in [ MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS, MultitaskPromptTuningInit.EXACT_SOURCE_TASK, MultitaskPromptTuningInit.ONLY_SOURCE_SHARED, ]: if config.prompt_tuning_init_state_dict_path is None: raise ValueError( f"prompt_tuning_init_state_dict_path needs to be specified with {config.prompt_tuning_init} " "init method" ) if config.prompt_tuning_init_state_dict_path.endswith(".safetensors"): from safetensors.torch import load_file state_dict: dict = load_file(config.prompt_tuning_init_state_dict_path) else: state_dict: dict = torch_load( config.prompt_tuning_init_state_dict_path, map_location=word_embeddings.weight.device, ) if config.prompt_tuning_init in [ MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS, MultitaskPromptTuningInit.EXACT_SOURCE_TASK, ]: prefix_task_cols_: torch.Tensor = state_dict["prefix_task_cols"] prefix_task_rows_: torch.Tensor = state_dict["prefix_task_rows"] if config.prompt_tuning_init == MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS: prefix_task_cols_ = prefix_task_cols_.mean(0, keepdim=True) prefix_task_rows_ = prefix_task_rows_.mean(0, keepdim=True) elif config.prompt_tuning_init == MultitaskPromptTuningInit.EXACT_SOURCE_TASK: prefix_task_cols_ = prefix_task_cols_[config.prompt_tuning_init_task, ...].unsqueeze(0) prefix_task_rows_ = prefix_task_rows_[config.prompt_tuning_init_task, ...].unsqueeze(0) state_dict = { "embedding.weight": state_dict["prompt_embeddings"], "prefix_task_cols": prefix_task_cols_, "prefix_task_rows": prefix_task_rows_, } self.load_state_dict(state_dict, strict=True) elif config.prompt_tuning_init == MultitaskPromptTuningInit.ONLY_SOURCE_SHARED: state_dict = { "embedding.weight": state_dict["prompt_embeddings"], } self.load_state_dict(state_dict, strict=False) def forward(self, indices, task_ids): if task_ids is None: raise ValueError("task_ids cannot be None") prompt_embeddings = self.embedding(indices) task_cols = torch.index_select(self.prefix_task_cols, 0, task_ids) task_rows = torch.index_select(self.prefix_task_rows, 0, task_ids) task_prompts = torch.matmul(task_cols, task_rows) prompt_embeddings *= task_prompts return prompt_embeddings
peft/src/peft/tuners/multitask_prompt_tuning/model.py/0
{ "file_path": "peft/src/peft/tuners/multitask_prompt_tuning/model.py", "repo_id": "peft", "token_count": 2251 }
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from typing import Any, Callable, Optional import torch import torch.nn as nn from torch import Tensor from peft.tuners import lora from .config import XLoraConfig class XLoraLayer: """ A XLoraLayer wraps any LoraLayer and performs the XLora operation on the LoRA adaptors specified. Its primary API is the forward method, which uses the scalings to execute the XLora algorithm. """ def __init__( self, model: nn.Module, # XLoraModel target: lora.LoraLayer, target_forward: Callable[..., Any], layer_number: int, config: XLoraConfig, ) -> None: self.model = model self.target_forward = target_forward self.target = target self.layer_number = layer_number self.config = config """ Apply the scalings for the adapter. """ @staticmethod def apply_scalings_to_x(x: torch.Tensor, scalings_layer: torch.Tensor, adapter: int) -> torch.Tensor: # scalings_layer = [batch_size, seq_len, n_classes] scalings = scalings_layer[:, :, adapter].unsqueeze(-1) # scalings_layer = [batch_size, seq_len, 1] return x * scalings """ Get the scalings for this layer, potentially applying topk and topk+softmax. This is called before `apply_scalings_to_x` """ def get_maybe_topk_scalings(self, scalings) -> torch.Tensor: # xlora_scalings = [batch_size, seq_len, n_classes] xlora_scalings: Tensor = scalings[:, :, self.layer_number, :] # type: ignore if self.config.top_k_lora is not None: _, topk_indices = torch.topk(xlora_scalings, k=self.config.top_k_lora, dim=-1) # Mask the topk to True, the rest to False mask = torch.zeros_like(xlora_scalings, dtype=torch.bool) mask.scatter_(-1, topk_indices, True) xlora_scalings = xlora_scalings * mask.to(xlora_scalings.dtype) if self.config.enable_softmax_topk: nonzero_mask = xlora_scalings != 0 softmax_res_nonzero = torch.softmax(xlora_scalings[nonzero_mask], dim=-1) xlora_scalings[nonzero_mask] = softmax_res_nonzero return xlora_scalings class XLoraLinearLayer(XLoraLayer): def __init__( self, model: nn.Module, target: lora.Linear, target_forward: Callable[..., Any], layer_number: int, config: XLoraConfig, ) -> None: super().__init__(model, target, target_forward, layer_number, config) def forward(self, x: Tensor, *args: Any, scalings: Optional[Tensor] = None, **kwargs: Any) -> Tensor: """ This method is designed to be a drop-in-replacement for the LoRA layers' .forward method. To use it, a bound method must be created (bound to an instance of the XLoraLayer class). """ previous_dtype = x.dtype if scalings is not None: xlora_scalings = self.get_maybe_topk_scalings(scalings) result = self.target.base_layer(x, *args, **kwargs) # Ignore if disabled. We want to make sure this is always run. if not self.target.merged: for adapter_n, active_adapter in enumerate(self.target.active_adapters): # TODO: implement X-LoRA with Lora+Dora layers if self.target.use_dora[active_adapter]: raise ValueError("X-LoRA currently does not support LoRA layers with DoRA") if active_adapter not in self.target.lora_A.keys(): continue lora_A = self.target.lora_A[active_adapter] lora_B = self.target.lora_B[active_adapter] dropout = self.target.lora_dropout[active_adapter] scaling = self.target.scaling[active_adapter] x = x.to(lora_A.weight.dtype) # type: ignore if scalings is not None: x_mod = self.apply_scalings_to_x(x, xlora_scalings, adapter_n) scaling_weight = self.config.global_scaling_weight else: x_mod = x scaling_weight = 1 result += lora_B(lora_A(dropout(x_mod))) * scaling * scaling_weight result = result.to(previous_dtype) return result class XLoraEmbeddingLayer(XLoraLayer): def __init__( self, model: nn.Module, target: lora.Embedding, target_forward: Callable[..., Any], layer_number: int, config: XLoraConfig, ) -> None: super().__init__(model, target, target_forward, layer_number, config) def forward(self, x: Tensor, *args: Any, scalings: Optional[Tensor] = None, **kwargs: Any) -> Tensor: """ This method is designed to be a drop-in-replacement for the LoRA layers' .forward method. To use it, a bound method must be created (bound to an instance of the XLoraLayer class). """ if scalings is not None: xlora_scalings = self.get_maybe_topk_scalings(scalings) result = self.target.base_layer(x, *args, **kwargs) # Ignore if disabled. We want to make sure this is always run. if not self.target.merged: for adapter_n, active_adapter in enumerate(self.target.active_adapters): # TODO: implement X-LoRA with Lora+Dora layers if self.target.use_dora.get(active_adapter, False): raise ValueError("X-LoRA currently does not support LoRA layers with DoRA") if active_adapter not in self.target.lora_embedding_A: continue embedding_A = self.target.lora_embedding_A[active_adapter].T embedding_B = self.target.lora_embedding_B[active_adapter].T scaling = self.target.scaling[active_adapter] after_A = self.target._embed(x, embedding_A) # type: ignore if scalings is not None: after_A_mod = self.apply_scalings_to_x(after_A, xlora_scalings, adapter_n) scaling_weight = self.config.global_scaling_weight else: after_A_mod = after_A scaling_weight = 1 result += (after_A_mod @ embedding_B) * scaling * scaling_weight return result class XLoraConv2dLayer(XLoraLayer): def __init__( self, model: nn.Module, target: lora.Conv2d, target_forward: Callable[..., Any], layer_number: int, config: XLoraConfig, ) -> None: super().__init__(model, target, target_forward, layer_number, config) def forward(self, x: Tensor, *args: Any, scalings: Optional[Tensor] = None, **kwargs: Any) -> Tensor: """ This method is designed to be a drop-in-replacement for the LoRA layers' .forward method. To use it, a bound method must be created (bound to an instance of the XLoraLayer class). """ previous_dtype = x.dtype if scalings is not None: xlora_scalings = self.get_maybe_topk_scalings(scalings) result = self.target.base_layer(x, *args, **kwargs) # Ignore if disabled. We want to make sure this is always run. if not self.target.merged: for adapter_n, active_adapter in enumerate(self.target.active_adapters): # TODO: implement X-LoRA with Lora+Dora layers if self.target.use_dora[active_adapter]: raise ValueError("X-LoRA currently does not support LoRA layers with DoRA") if active_adapter not in self.target.lora_A.keys(): continue lora_A = self.target.lora_A[active_adapter] lora_B = self.target.lora_B[active_adapter] dropout = self.target.lora_dropout[active_adapter] scaling = self.target.scaling[active_adapter] x = x.to(lora_A.weight.dtype) # type: ignore if scalings is not None: x_mod = self.apply_scalings_to_x(x, xlora_scalings, adapter_n) scaling_weight = self.config.global_scaling_weight else: x_mod = x scaling_weight = 1 result += lora_B(lora_A(dropout(x_mod))) * scaling * scaling_weight result = result.to(previous_dtype) return result
peft/src/peft/tuners/xlora/layer.py/0
{ "file_path": "peft/src/peft/tuners/xlora/layer.py", "repo_id": "peft", "token_count": 4097 }
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os import tempfile import unittest from unittest import TestCase import pytest import torch from torch.testing import assert_close from peft import get_peft_model from peft.peft_model import PeftModel from peft.tuners.adaption_prompt import AdaptionPromptConfig from peft.utils.other import prepare_model_for_kbit_training from peft.utils.save_and_load import get_peft_model_state_dict from tests.testing_common import PeftCommonTester def is_llama_available() -> bool: """Check if Llama is available in the transformers library (it's not in earlier versions).""" try: return importlib.util.find_spec("transformers.models.llama.modeling_llama") is not None except ModuleNotFoundError: return False def is_mistral_available() -> bool: """Check if mistral is available in the transformers library (it's not in earlier versions).""" try: return importlib.util.find_spec("transformers.models.mistral.modeling_mistral") is not None except ModuleNotFoundError: return False if is_llama_available(): # We guard the import statement so that our unit tests will pass in CI environments # that don't have a transformers package with Llama. from transformers import LlamaConfig, LlamaForCausalLM, LlamaModel if is_mistral_available(): # We guard the import statement so that our unit tests will pass in CI environments # that don't have a transformers package with Mistral. from transformers import MistralConfig, MistralForCausalLM, MistralModel class AdaptionPromptTester(TestCase, PeftCommonTester): """ Tests for the AdaptionPrompt model. Some of these tests were adapted from `test_peft_model.py` (which has been refactored since), but since we haven't checked in the test checkpoints for Llama into `hf-internal-testing`, we separate them for now. """ def setUp(self): # Check that llama is available in transformers package before running each test. if not is_llama_available(): self.skipTest("Llama not available in transformers. Skipping all tests.") else: # Check for Mistral's availability. It might or might not be available. self.mistral_available = is_mistral_available() @staticmethod def _create_test_llama_config(): """Create a test config for a small Llama model for testing.""" return LlamaConfig( vocab_size=16, hidden_size=8, intermediate_size=8, num_hidden_layers=8, num_attention_heads=4, use_cache=False, ) @staticmethod def _create_test_mistral_config(): """Create a test config for a small Mistral model for testing.""" return MistralConfig( vocab_size=16, hidden_size=8, intermediate_size=8, num_hidden_layers=8, num_attention_heads=4, num_key_value_heads=2, use_cache=False, ) def test_attributes(self) -> None: model = LlamaModel(self._create_test_llama_config()) config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4) model = get_peft_model(model, config) assert hasattr(model, "save_pretrained") assert hasattr(model, "from_pretrained") assert hasattr(model, "push_to_hub") @unittest.skipIf(not is_mistral_available(), "Mistral is not available") def test_attributes_mistral(self) -> None: model_mistral = MistralModel(self._create_test_mistral_config()) config_mistral = AdaptionPromptConfig(adapter_layers=1, adapter_len=4) model_mistral = get_peft_model(model_mistral, config_mistral) assert hasattr(model_mistral, "save_pretrained") assert hasattr(model_mistral, "from_pretrained") assert hasattr(model_mistral, "push_to_hub") def test_prepare_for_training(self) -> None: # Test Llama model = LlamaForCausalLM(self._create_test_llama_config()) config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM") model = get_peft_model(model, config) model = model.to(self.torch_device) dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device) dummy_output = model.get_input_embeddings()(dummy_input) assert not dummy_output.requires_grad @unittest.skipIf(not is_mistral_available(), "Mistral is not available") def test_prepare_for_training_mistral(self) -> None: model_mistral = MistralForCausalLM(self._create_test_mistral_config()) config_mistral = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM") model_mistral = get_peft_model(model_mistral, config_mistral) model_mistral = model_mistral.to(self.torch_device) dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device) dummy_output = model_mistral.get_input_embeddings()(dummy_input) assert not dummy_output.requires_grad def test_prepare_for_int8_training(self) -> None: model = LlamaForCausalLM(self._create_test_llama_config()) model = prepare_model_for_kbit_training(model) model = model.to(self.torch_device) for param in model.parameters(): assert not param.requires_grad config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM") model = get_peft_model(model, config) # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device) dummy_output = model.get_input_embeddings()(dummy_input) assert dummy_output.requires_grad @unittest.skipIf(not is_mistral_available(), "Mistral is not available") def test_prepare_model_for_kbit_training_mistral(self) -> None: model_mistral = MistralForCausalLM(self._create_test_mistral_config()) model_mistral = prepare_model_for_kbit_training(model_mistral) model_mistral = model_mistral.to(self.torch_device) for param in model_mistral.parameters(): assert not param.requires_grad config_mistral = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM") model_mistral = get_peft_model(model_mistral, config_mistral) # For backward compatibility if hasattr(model_mistral, "enable_input_require_grads"): model_mistral.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model_mistral.get_input_embeddings().register_forward_hook(make_inputs_require_grad) dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device) dummy_output = model_mistral.get_input_embeddings()(dummy_input) assert dummy_output.requires_grad def test_save_pretrained_regression(self) -> None: seed = 420 torch.manual_seed(seed) model = LlamaForCausalLM(self._create_test_llama_config()) config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname, safe_serialization=False) torch.manual_seed(seed) model_from_pretrained = LlamaForCausalLM(self._create_test_llama_config()) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) # check if the state dicts are equal state_dict = get_peft_model_state_dict(model) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained) # check if same keys assert state_dict.keys() == state_dict_from_pretrained.keys() # Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate). assert len(state_dict) == 4 # check if tensors equal for key in state_dict.keys(): assert torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) # check if `adapter_model.bin` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.bin")) # check if `adapter_config.json` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) # check if `model.safetensors` is not present assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors")) # check if `config.json` is not present assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) @unittest.skipIf(not is_mistral_available(), "Mistral is not available") def test_save_pretrained_regression_mistral(self) -> None: seed = 420 torch.manual_seed(seed) model_mistral = MistralForCausalLM(self._create_test_mistral_config()) config_mistral = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") model_mistral = get_peft_model(model_mistral, config_mistral) model_mistral = model_mistral.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model_mistral.save_pretrained(tmp_dirname, safe_serialization=False) torch.manual_seed(seed) model_from_pretrained_mistral = MistralForCausalLM(self._create_test_mistral_config()) model_from_pretrained_mistral = PeftModel.from_pretrained(model_from_pretrained_mistral, tmp_dirname) # check if the state dicts are equal state_dict = get_peft_model_state_dict(model_mistral) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained_mistral) # check if same keys assert state_dict.keys() == state_dict_from_pretrained.keys() # Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate). assert len(state_dict) == 4 # check if tensors equal for key in state_dict.keys(): assert torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) # check if `adapter_model.bin` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.bin")) # check if `adapter_config.json` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) # check if `model.safetensors` is not present assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors")) # check if `config.json` is not present assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) def test_save_pretrained(self) -> None: seed = 420 torch.manual_seed(seed) model = LlamaForCausalLM(self._create_test_llama_config()) config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) torch.manual_seed(seed) model_from_pretrained = LlamaForCausalLM(self._create_test_llama_config()) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) # check if the state dicts are equal state_dict = get_peft_model_state_dict(model) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained) # check if same keys assert state_dict.keys() == state_dict_from_pretrained.keys() # Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate). assert len(state_dict) == 4 # check if tensors equal for key in state_dict.keys(): assert torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) # check if `adapter_model.bin` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.safetensors")) # check if `adapter_config.json` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) # check if `model.safetensors` is not present assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors")) # check if `config.json` is not present assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) @unittest.skipIf(not is_mistral_available(), "Mistral is not available") def test_save_pretrained_mistral(self) -> None: seed = 420 torch.manual_seed(seed) model_mistral = MistralForCausalLM(self._create_test_mistral_config()) config_mistral = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") model_mistral = get_peft_model(model_mistral, config_mistral) model_mistral = model_mistral.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model_mistral.save_pretrained(tmp_dirname) torch.manual_seed(seed) model_from_pretrained_mistral = MistralForCausalLM(self._create_test_mistral_config()) model_from_pretrained_mistral = PeftModel.from_pretrained(model_from_pretrained_mistral, tmp_dirname) # check if the state dicts are equal state_dict = get_peft_model_state_dict(model_mistral) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained_mistral) # check if same keys assert state_dict.keys() == state_dict_from_pretrained.keys() # Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate). assert len(state_dict) == 4 # check if tensors equal for key in state_dict.keys(): assert torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) # check if `adapter_model.bin` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.safetensors")) # check if `adapter_config.json` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) # check if `model.safetensors` is not present assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors")) # check if `config.json` is not present assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) def test_save_pretrained_selected_adapters(self) -> None: seed = 420 torch.manual_seed(seed) model = LlamaForCausalLM(self._create_test_llama_config()) config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") model = get_peft_model(model, config) model = model.to(self.torch_device) new_adapter_config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") model.add_adapter("new_adapter", new_adapter_config) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) torch.manual_seed(seed) model_from_pretrained = LlamaForCausalLM(self._create_test_llama_config()) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) model_from_pretrained.load_adapter(tmp_dirname, "new_adapter") # check if the state dicts are equal state_dict = get_peft_model_state_dict(model) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained) # check if same keys assert state_dict.keys() == state_dict_from_pretrained.keys() # Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate). assert len(state_dict) == 4 # check if tensors equal for key in state_dict.keys(): assert torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) # check if `adapter_model.bin` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.safetensors")) # check if `adapter_config.json` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) # check if `model.safetensors` is not present assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors")) # check if `config.json` is not present assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) @unittest.skipIf(not is_mistral_available(), "Mistral is not available") def test_save_pretrained_selected_adapters_mistral(self) -> None: seed = 420 torch.manual_seed(seed) model_mistral = MistralForCausalLM(self._create_test_mistral_config()) config_mistral = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") model_mistral = get_peft_model(model_mistral, config_mistral) model_mistral = model_mistral.to(self.torch_device) new_adapter_config_mistral = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") model_mistral.add_adapter("new_adapter", new_adapter_config_mistral) with tempfile.TemporaryDirectory() as tmp_dirname: model_mistral.save_pretrained(tmp_dirname) torch.manual_seed(seed) model_from_pretrained_mistral = MistralForCausalLM(self._create_test_mistral_config()) model_from_pretrained_mistral = PeftModel.from_pretrained(model_from_pretrained_mistral, tmp_dirname) model_from_pretrained_mistral.load_adapter(tmp_dirname, "new_adapter") # check if the state dicts are equal state_dict = get_peft_model_state_dict(model_mistral) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained_mistral) # check if same keys assert state_dict.keys() == state_dict_from_pretrained.keys() # Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate). assert len(state_dict) == 4 # check if tensors equal for key in state_dict.keys(): assert torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) # check if `adapter_model.bin` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.safetensors")) # check if `adapter_config.json` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) # check if `model.safetensors` is not present assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors")) # check if `config.json` is not present assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) def test_generate(self) -> None: model = LlamaForCausalLM(self._create_test_llama_config()) config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") model = get_peft_model(model, config) model = model.to(self.torch_device) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) # check if `generate` works _ = model.generate(input_ids=input_ids, attention_mask=attention_mask) # check if `generate` works if positional arguments are passed _ = model.generate(input_ids, attention_mask=attention_mask) @unittest.skipIf(not is_mistral_available(), "Mistral is not available") def test_generate_mistral(self) -> None: model_mistral = MistralForCausalLM(self._create_test_mistral_config()) config_mistral = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") model_mistral = get_peft_model(model_mistral, config_mistral) model_mistral = model_mistral.to(self.torch_device) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) # check if `generate` works _ = model_mistral.generate(input_ids=input_ids, attention_mask=attention_mask) # check if `generate` works if positional arguments are passed _ = model_mistral.generate(input_ids, attention_mask=attention_mask) def test_sequence_adapter_ops(self) -> None: """Test sequence of adapter operations.""" # Test input data. input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) target_ids = torch.LongTensor([[0, 0, 0], [0, 0, 0]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) # Create original llama model. original = LlamaForCausalLM(self._create_test_llama_config()) original = original.to(self.torch_device) original_before = original(input_ids=input_ids, attention_mask=attention_mask) # Get AdaptionPrompt model. adapted = get_peft_model( original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") ) adapted = adapted.to(self.torch_device) default_before = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) # Test zero-init: The logits should be exactly the same. assert_close(original_before.logits, default_before.logits, rtol=0, atol=0) # Single fine-tuning step on "default" adapter. optimizer = torch.optim.SGD(adapted.parameters(), lr=1) optimizer.zero_grad() default_before.loss.backward() optimizer.step() # Test that the output changed. default_after = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert not torch.allclose(default_before.logits, default_after.logits) with adapted.disable_adapter(): # Test that the output is the same as the original output. default_disabled = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(original_before.logits, default_disabled.logits, rtol=0, atol=0) # Add new adapter 1. adapted.add_adapter("adapter 1", AdaptionPromptConfig(adapter_layers=3, adapter_len=8, task_type="CAUSAL_LM")) # Test zero-init adapter_1_before = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(original_before.logits, adapter_1_before.logits, rtol=0, atol=0) # Single fine-tuning step on adapter 1. optimizer = torch.optim.SGD(adapted.parameters(), lr=1) optimizer.zero_grad() adapter_1_before.loss.backward() optimizer.step() # Test that adapter 1 output changed. adapter_1_after = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert not torch.allclose(adapter_1_before.logits, adapter_1_after.logits) assert not torch.allclose(original_before.logits, adapter_1_after.logits) assert not torch.allclose(default_after.logits, adapter_1_after.logits) with adapted.disable_adapter(): # Test that the output is the same as the original output. adapter_1_disabled = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(original_before.logits, adapter_1_disabled.logits, rtol=0, atol=0) # Set adapter back to default. adapted.set_adapter("default") # Test that the output is the same as the default output after training. default_after_set = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(default_after.logits, default_after_set.logits, rtol=0, atol=0) assert not torch.allclose(original_before.logits, default_after_set.logits) assert not torch.allclose(adapter_1_after.logits, default_after_set.logits) @unittest.skipIf(not is_mistral_available(), "Mistral is not available") def test_sequence_adapter_ops_mistral(self) -> None: # Test input data. input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) target_ids = torch.LongTensor([[0, 0, 0], [0, 0, 0]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) # Create original mistral model. model_mistral = MistralForCausalLM(self._create_test_mistral_config()) model_mistral = model_mistral.to(self.torch_device) original_before = model_mistral(input_ids=input_ids, attention_mask=attention_mask) # Get AdaptionPrompt model. adapted_mistral = get_peft_model( model_mistral, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") ) adapted_mistral = adapted_mistral.to(self.torch_device) default_before = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) # Test zero-init: The logits should be exactly the same. assert_close(original_before.logits, default_before.logits, rtol=0, atol=0) # Single fine-tuning step on "default" adapter. optimizer = torch.optim.SGD(adapted_mistral.parameters(), lr=1) optimizer.zero_grad() default_before.loss.backward() optimizer.step() # Test that the output changed. default_after = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert not torch.allclose(default_before.logits, default_after.logits) with adapted_mistral.disable_adapter(): # Test that the output is the same as the original output. default_disabled = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(original_before.logits, default_disabled.logits, rtol=0, atol=0) # Add new adapter 1. adapted_mistral.add_adapter( "adapter 1", AdaptionPromptConfig(adapter_layers=3, adapter_len=8, task_type="CAUSAL_LM") ) # Test zero-init adapter_1_before = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(original_before.logits, adapter_1_before.logits, rtol=0, atol=0) # Single fine-tuning step on adapter 1. optimizer = torch.optim.SGD(adapted_mistral.parameters(), lr=1) optimizer.zero_grad() adapter_1_before.loss.backward() optimizer.step() # Test that adapter 1 output changed. adapter_1_after = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert not torch.allclose(adapter_1_before.logits, adapter_1_after.logits) assert not torch.allclose(original_before.logits, adapter_1_after.logits) assert not torch.allclose(default_after.logits, adapter_1_after.logits) with adapted_mistral.disable_adapter(): # Test that the output is the same as the original output. adapter_1_disabled = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(original_before.logits, adapter_1_disabled.logits, rtol=0, atol=0) # Set adapter back to default. adapted_mistral.set_adapter("default") # Test that the output is the same as the default output after training. default_after_set = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(default_after.logits, default_after_set.logits, rtol=0, atol=0) assert not torch.allclose(original_before.logits, default_after_set.logits) assert not torch.allclose(adapter_1_after.logits, default_after_set.logits) def test_add_and_set_while_disabled(self): """Test that adding and setting adapters while disabled works as intended.""" # Test input data. input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) target_ids = torch.LongTensor([[0, 0, 0], [0, 0, 0]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) # Create original llama model. original = LlamaForCausalLM(self._create_test_llama_config()) original = original.to(self.torch_device) original_before = original(input_ids=input_ids, attention_mask=attention_mask) # Get AdaptionPrompt model. adapted = get_peft_model( original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") ) adapted = adapted.to(self.torch_device) with adapted.disable_adapter(): adapted.add_adapter( "adapter 1", AdaptionPromptConfig(adapter_layers=3, adapter_len=8, task_type="CAUSAL_LM") ) # Test that the output is the same as the original output. adapter_1_before = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(original_before.logits, adapter_1_before.logits, rtol=0, atol=0) # Single fine-tuning step on adapter 1. optimizer = torch.optim.SGD(adapted.parameters(), lr=1) optimizer.zero_grad() adapter_1_before.loss.backward() optimizer.step() # Test that adapter 1 output changed. adapter_1_after = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert not torch.allclose(original_before.logits, adapter_1_after.logits) adapted.set_adapter("default") with adapted.disable_adapter(): adapted.set_adapter("adapter 1") # Test that adapter 1 is active again. adapter_1_after_set = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(adapter_1_after.logits, adapter_1_after_set.logits, rtol=0, atol=0) @unittest.skipIf(not is_mistral_available(), "Mistral is not available") def test_add_and_set_while_disabled_mistral(self): # Test input data. input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) target_ids = torch.LongTensor([[0, 0, 0], [0, 0, 0]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) # Create original mistral model. model_mistral = MistralForCausalLM(self._create_test_mistral_config()) model_mistral = model_mistral.to(self.torch_device) original_before = model_mistral(input_ids=input_ids, attention_mask=attention_mask) # Get AdaptionPrompt model. adapted_mistral = get_peft_model( model_mistral, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") ) adapted_mistral = adapted_mistral.to(self.torch_device) with adapted_mistral.disable_adapter(): adapted_mistral.add_adapter( "adapter 1", AdaptionPromptConfig(adapter_layers=3, adapter_len=8, task_type="CAUSAL_LM") ) # Test that the output is the same as the original output. adapter_1_before = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(original_before.logits, adapter_1_before.logits, rtol=0, atol=0) # Single fine-tuning step on adapter 1. optimizer = torch.optim.SGD(adapted_mistral.parameters(), lr=1) optimizer.zero_grad() adapter_1_before.loss.backward() optimizer.step() # Test that adapter 1 output changed. adapter_1_after = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert not torch.allclose(original_before.logits, adapter_1_after.logits) adapted_mistral.set_adapter("default") with adapted_mistral.disable_adapter(): adapted_mistral.set_adapter("adapter 1") # Test that adapter 1 is active again. adapter_1_after_set = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(adapter_1_after.logits, adapter_1_after_set.logits, rtol=0, atol=0) def test_use_cache(self) -> None: """Test that AdaptionPrompt works when Llama config use_cache=True.""" torch.manual_seed(0) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) original = LlamaForCausalLM( LlamaConfig( vocab_size=16, hidden_size=8, intermediate_size=8, num_hidden_layers=8, num_attention_heads=4, use_cache=False, ) ).eval() adapted = get_peft_model( original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") ) adapted = adapted.to(self.torch_device) expected = adapted.generate(input_ids=input_ids, max_length=8) # Set use_cache = True and generate output again. adapted.base_model.config.use_cache = True actual = adapted.generate(input_ids=input_ids, max_length=8) assert_close(expected, actual, rtol=0, atol=0) @unittest.skipIf(not is_mistral_available(), "Mistral is not available") def test_use_cache_mistral(self) -> None: torch.manual_seed(0) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) original = MistralForCausalLM( MistralConfig( vocab_size=16, hidden_size=8, intermediate_size=8, num_hidden_layers=8, num_attention_heads=4, num_key_value_heads=2, use_cache=False, ) ).eval() adapted = get_peft_model( original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") ) adapted = adapted.to(self.torch_device) expected = adapted.generate(input_ids=input_ids, max_length=8) # Set use_cache = True and generate output again. adapted.base_model.config.use_cache = True actual = adapted.generate(input_ids=input_ids, max_length=8) assert_close(expected, actual, rtol=0, atol=0) def test_bf16_inference(self) -> None: if self.torch_device == "mps": return pytest.skip("Skipping bf16 test on MPS") """Test that AdaptionPrompt works when Llama using a half-precision model.""" input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) original = LlamaForCausalLM.from_pretrained( "trl-internal-testing/tiny-random-LlamaForCausalLM", torch_dtype=torch.bfloat16 ) adapted = get_peft_model( original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") ) adapted = adapted.to(self.torch_device) _ = adapted.generate(input_ids=input_ids) @unittest.expectedFailure def test_disable_adapter(self): llama_config = self._create_test_llama_config() model = LlamaForCausalLM(llama_config).to(self.torch_device) dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device) output_before = model(dummy_input).logits config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM") model = get_peft_model(model, config).to(self.torch_device) output_peft = model(dummy_input).logits # TODO currently this fails because scores are zeroed out: # https://github.com/huggingface/peft/blob/062d95a09eb5d1de35c0e5e23d4387daba99e2db/src/peft/tuners/adaption_prompt.py#L303 # This is fine for users but makes it difficult to test if anything happens. In the future, we will have a clean # way to control initialization. Until then, this test is expected to fail. assert not torch.allclose(output_before, output_peft) with model.disable_adapter(): output_peft_disabled = model(dummy_input).logits assert torch.allclose(output_before, output_peft_disabled)
peft/tests/test_adaption_prompt.py/0
{ "file_path": "peft/tests/test_adaption_prompt.py", "repo_id": "peft", "token_count": 16292 }
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch import nn from peft.utils.integrations import init_empty_weights, skip_init_on_device class MLP(nn.Module): def __init__(self, bias=True): super().__init__() self.lin0 = nn.Linear(10, 20, bias=bias) self.relu = nn.ReLU() self.drop = nn.Dropout(0.5) self.lin1 = nn.Linear(20, 2, bias=bias) def get_mlp(): return MLP() class TestInitEmptyWeights: def test_init_empty_weights_works(self): # this is a very rudimentary test, as init_empty_weights is copied almost 1:1 from accelerate and is tested # there with init_empty_weights(): mlp = get_mlp() expected = torch.device("meta") assert all(p.device == expected for p in mlp.parameters()) def test_skip_init_on_device_works(self): # when a function is decorated with skip_init_on_device, the parameters are not moved to meta device, even when # inside the context decorated_fn = skip_init_on_device(get_mlp) with init_empty_weights(): mlp = decorated_fn() expected = torch.device("cpu") assert all(p.device == expected for p in mlp.parameters()) def test_skip_init_on_device_works_outside_context(self): # same as before, but ensure that skip_init_on_device does not break when no init_empty_weights context is used decorated_fn = skip_init_on_device(get_mlp) mlp = decorated_fn() expected = torch.device("cpu") assert all(p.device == expected for p in mlp.parameters()) def test_skip_init_on_device_not_permanent(self): # ensure that after skip_init_on_device has been used, init_empty_weights reverts to its original functionality # with decorator => cpu decorated_fn = skip_init_on_device(get_mlp) with init_empty_weights(): mlp = decorated_fn() expected = torch.device("cpu") assert all(p.device == expected for p in mlp.parameters()) # without decorator => meta with init_empty_weights(): mlp = get_mlp() expected = torch.device("meta") assert all(p.device == expected for p in mlp.parameters()) def test_skip_init_on_device_nested(self): # ensure that skip_init_on_device works even if the decorated function is nested inside another decorated # function @skip_init_on_device def outer_fn(): @skip_init_on_device def inner_fn(): return get_mlp() mlp0 = inner_fn() mlp1 = get_mlp() return mlp0, mlp1 with init_empty_weights(): mlp0, mlp1 = outer_fn() expected = torch.device("cpu") assert all(p.device == expected for p in mlp0.parameters()) assert all(p.device == expected for p in mlp1.parameters())
peft/tests/test_integrations.py/0
{ "file_path": "peft/tests/test_integrations.py", "repo_id": "peft", "token_count": 1367 }
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import os import pickle import re import shutil import tempfile import warnings from collections import OrderedDict from dataclasses import replace import pytest import torch import yaml from diffusers import StableDiffusionPipeline from packaging import version from safetensors.torch import save_file from peft import ( AdaLoraConfig, BOFTConfig, BoneConfig, CPTConfig, FourierFTConfig, HRAConfig, IA3Config, LNTuningConfig, LoHaConfig, LoKrConfig, LoraConfig, OFTConfig, PeftModel, PeftType, PrefixTuningConfig, PromptEncoderConfig, PromptLearningConfig, PromptTuningConfig, VBLoRAConfig, VeraConfig, get_peft_model, get_peft_model_state_dict, inject_adapter_in_model, prepare_model_for_kbit_training, ) from peft.tuners.lora import LoraLayer from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import _get_submodules, infer_device from .testing_utils import get_state_dict CONFIG_TESTING_KWARGS = ( # IA³ { "target_modules": None, "feedforward_modules": None, }, # LoRA { "r": 8, "lora_alpha": 32, "target_modules": None, "lora_dropout": 0.05, "bias": "none", }, # prefix tuning { "num_virtual_tokens": 10, }, # prompt encoder { "num_virtual_tokens": 10, "encoder_hidden_size": 32, }, # prompt tuning { "num_virtual_tokens": 10, }, # AdaLoRA { "target_modules": None, "total_step": 1, }, # BOFT { "target_modules": None, }, # VeRA { "r": 8, "target_modules": None, "vera_dropout": 0.05, "projection_prng_key": 0xFF, "d_initial": 0.1, "save_projection": True, "bias": "none", }, # FourierFT { "n_frequency": 10, "target_modules": None, }, # HRA { "target_modules": None, }, # VBLoRA {"target_modules": None, "vblora_dropout": 0.05, "vector_length": 1, "num_vectors": 2}, # OFT { "target_modules": None, }, # Bone { "target_modules": None, "r": 2, }, # CPT tuninig { "cpt_token_ids": [0, 1, 2, 3, 4, 5, 6, 7], # Example token IDs for testing "cpt_mask": [1, 1, 1, 1, 1, 1, 1, 1], "cpt_tokens_type_mask": [1, 2, 2, 2, 3, 3, 4, 4], }, ) CLASSES_MAPPING = { "ia3": (IA3Config, CONFIG_TESTING_KWARGS[0]), "lora": (LoraConfig, CONFIG_TESTING_KWARGS[1]), "prefix_tuning": (PrefixTuningConfig, CONFIG_TESTING_KWARGS[2]), "prompt_encoder": (PromptEncoderConfig, CONFIG_TESTING_KWARGS[3]), "prompt_tuning": (PromptTuningConfig, CONFIG_TESTING_KWARGS[4]), "adalora": (AdaLoraConfig, CONFIG_TESTING_KWARGS[5]), "boft": (BOFTConfig, CONFIG_TESTING_KWARGS[6]), "vera": (VeraConfig, CONFIG_TESTING_KWARGS[7]), "fourierft": (FourierFTConfig, CONFIG_TESTING_KWARGS[8]), "hra": (HRAConfig, CONFIG_TESTING_KWARGS[9]), "vblora": (VBLoRAConfig, CONFIG_TESTING_KWARGS[10]), "oft": (OFTConfig, CONFIG_TESTING_KWARGS[11]), "bone": (BoneConfig, CONFIG_TESTING_KWARGS[12]), } DECODER_MODELS_EXTRA = {"cpt": (CPTConfig, CONFIG_TESTING_KWARGS[13])} # Adapted from https://github.com/huggingface/transformers/blob/48327c57182fdade7f7797d1eaad2d166de5c55b/src/transformers/activations.py#LL166C7-L166C22 class ClassInstantier(OrderedDict): def __getitem__(self, key, *args, **kwargs): # check if any of the kwargs is inside the config class kwargs if any(kwarg in self[key][1] for kwarg in kwargs): new_config_kwargs = self[key][1].copy() new_config_kwargs.update(kwargs) return (self[key][0], new_config_kwargs) return super().__getitem__(key, *args, **kwargs) def get_grid_parameters(self, grid_parameters, filter_params_func=None): r""" Returns a list of all possible combinations of the parameters in the config classes. Args: grid_parameters (`dict`): A dictionary containing the parameters to be tested. There should be at least the key "model_ids" which contains a list of model ids to be tested. The other keys should be the name of the config class post-fixed with "_kwargs" and the value should be a dictionary containing the parameters to be tested for that config class. filter_params_func (`callable`, `optional`): A function that takes a list of tuples and returns a list of tuples. This function is used to filter out the tests that needs for example to be skipped. Returns: generated_tests (`list`): A list of tuples containing the name of the test, the model id, the config class and the config class kwargs. """ generated_tests = [] model_list = grid_parameters["model_ids"] task_type = grid_parameters["task_type"] if "task_type" in grid_parameters else None for model_id in model_list: for key, value in self.items(): if f"{key}_kwargs" in grid_parameters: peft_configs = [] current_peft_config = value[1].copy() for current_key, current_value in grid_parameters[f"{key}_kwargs"].items(): for kwarg in current_value: current_peft_config.update({current_key: kwarg}) if task_type is not None: current_peft_config.update({"task_type": task_type}) peft_configs.append(current_peft_config.copy()) else: current_peft_config = value[1].copy() if task_type is not None: current_peft_config.update({"task_type": task_type}) peft_configs = [current_peft_config] for peft_config in peft_configs: generated_tests.append((f"test_{model_id}_{key}", model_id, value[0], peft_config)) if filter_params_func is not None: generated_tests = filter_params_func(generated_tests) return generated_tests PeftTestConfigManager = ClassInstantier(CLASSES_MAPPING) PeftTestConfigManagerForDecoderModels = ClassInstantier({**CLASSES_MAPPING, **DECODER_MODELS_EXTRA}) class PeftCommonTester: r""" A large testing suite for testing common functionality of the PEFT models. Attributes: torch_device (`torch.device`): The device on which the tests will be run. transformers_class (`transformers.PreTrainedModel`): The transformers class that is being tested. """ torch_device = infer_device() transformers_class = None def prepare_inputs_for_common(self): raise NotImplementedError def check_modelcard(self, tmp_dirname, model): # check the generated README.md filename = os.path.join(tmp_dirname, "README.md") assert os.path.exists(filename) with open(filename, encoding="utf-8") as f: readme = f.read() metainfo = re.search(r"---\n(.*?)\n---", readme, re.DOTALL).group(1) dct = yaml.safe_load(metainfo) assert dct["library_name"] == "peft" if hasattr(model, "config"): assert dct["base_model"] == model.config.to_dict()["_name_or_path"] else: # a custom model assert "base_model" not in dct def check_config_json(self, tmp_dirname, model): # check the generated config.json filename = os.path.join(tmp_dirname, "adapter_config.json") assert os.path.exists(filename) with open(filename, encoding="utf-8") as f: config = json.load(f) if hasattr(model, "config"): # custom models don't have a config attribute assert config["base_model_name_or_path"] == model.config.to_dict()["_name_or_path"] def _test_model_attr(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) assert hasattr(model, "save_pretrained") assert hasattr(model, "from_pretrained") assert hasattr(model, "push_to_hub") def _test_adapter_name(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config, adapter_name="test-adapter") correctly_converted = False for n, _ in model.named_parameters(): if "test-adapter" in n: correctly_converted = True break assert correctly_converted def _test_prepare_for_training(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) dummy_input = self.prepare_inputs_for_testing() dummy_output = model.get_input_embeddings()(dummy_input["input_ids"]) assert not dummy_output.requires_grad # load with `prepare_model_for_kbit_training` model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) model = prepare_model_for_kbit_training(model) for param in model.parameters(): assert not param.requires_grad config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) dummy_input = self.prepare_inputs_for_testing() dummy_output = model.get_input_embeddings()(dummy_input["input_ids"]) assert dummy_output.requires_grad def _test_load_model_low_cpu_mem_usage(self, model_id, config_cls, config_kwargs): # Ensure that low_cpu_mem_usage=True works for from_pretrained and load_adapter and that the resulting model's # parameters are on the correct device. model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) # note: not using the context manager here because it fails on Windows CI for some reason tmp_dirname = tempfile.mkdtemp() try: model.save_pretrained(tmp_dirname) model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) model = PeftModel.from_pretrained( model, tmp_dirname, torch_device=self.torch_device, low_cpu_mem_usage=True ) assert {p.device.type for p in model.parameters()} == {self.torch_device} model.load_adapter(tmp_dirname, adapter_name="other", low_cpu_mem_usage=True) assert {p.device.type for p in model.parameters()} == {self.torch_device} finally: try: shutil.rmtree(tmp_dirname) except PermissionError: # windows error pass # also test injecting directly del model model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) inject_adapter_in_model(config, model, low_cpu_mem_usage=True) # check that there is no error if not isinstance(config, LNTuningConfig): # LN tuning does not add adapter layers that could be on meta device, it only changes the requires_grad. # Therefore, there is no meta device for LN tuning. assert "meta" in {p.device.type for p in model.parameters()} def _test_save_pretrained(self, model_id, config_cls, config_kwargs, safe_serialization=True): # ensure that the weights are randomly initialized if issubclass(config_cls, LoraConfig): config_kwargs = config_kwargs.copy() config_kwargs["init_lora_weights"] = False if issubclass(config_cls, IA3Config): config_kwargs = config_kwargs.copy() config_kwargs["init_ia3_weights"] = False if issubclass(config_cls, VeraConfig): config_kwargs = config_kwargs.copy() config_kwargs["init_weights"] = False model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: if safe_serialization: model.save_pretrained(tmp_dirname) else: model.save_pretrained(tmp_dirname, safe_serialization=False) model_from_pretrained = self.transformers_class.from_pretrained(model_id) with warnings.catch_warnings(record=True) as recs: model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) # ensure that there is no warning assert not any("Found missing adapter keys" in str(rec.message) for rec in recs) # check if the state dicts are equal if issubclass(config_cls, PromptEncoderConfig): # For prompt encoding, when loading the whole state_dict, there are differences, therefore, only load # adapter-specific weights for comparison. # TODO: is this expected? state_dict = get_peft_model_state_dict(model, unwrap_compiled=True) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained, unwrap_compiled=True) else: state_dict = get_state_dict(model, unwrap_compiled=True) state_dict_from_pretrained = get_state_dict(model_from_pretrained, unwrap_compiled=True) # check if tensors equal for key in state_dict.keys(): assert torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) target_adapter_filename = "adapter_model.safetensors" if safe_serialization else "adapter_model.bin" # check if `adapter_model.safetensors` is present assert os.path.exists(os.path.join(tmp_dirname, target_adapter_filename)) # check if `adapter_config.json` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) # check if `model.safetensors` is not present assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors")) # check if `config.json` is not present assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) self.check_modelcard(tmp_dirname, model) self.check_config_json(tmp_dirname, model) def _test_save_pretrained_selected_adapters(self, model_id, config_cls, config_kwargs, safe_serialization=True): if issubclass(config_cls, AdaLoraConfig): # AdaLora does not support adding more than 1 adapter return pytest.skip(f"Test not applicable for {config_cls}") # ensure that the weights are randomly initialized if issubclass(config_cls, LoraConfig): config_kwargs = config_kwargs.copy() config_kwargs["init_lora_weights"] = False elif issubclass(config_cls, IA3Config): config_kwargs = config_kwargs.copy() config_kwargs["init_ia3_weights"] = False elif hasattr(config_cls, "init_weights"): config_kwargs["init_weights"] = False model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) new_adapter_config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model.add_adapter("new_adapter", new_adapter_config) with tempfile.TemporaryDirectory() as tmp_dirname: if safe_serialization: model.save_pretrained(tmp_dirname) else: model.save_pretrained(tmp_dirname, safe_serialization=False) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) new_adapter_dir = os.path.join(tmp_dirname, "new_adapter") model_from_pretrained.load_adapter(new_adapter_dir, "new_adapter") # check if the state dicts are equal if issubclass(config_cls, PromptEncoderConfig): # For prompt encoding, when loading the whole state_dict, there are differences, therefore, only load # adapter-specific weights for comparison. # TODO: is this expected? state_dict = get_peft_model_state_dict(model, unwrap_compiled=True) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained, unwrap_compiled=True) else: state_dict = get_state_dict(model, unwrap_compiled=True) state_dict_from_pretrained = get_state_dict(model_from_pretrained, unwrap_compiled=True) # check if same keys assert state_dict.keys() == state_dict_from_pretrained.keys() # check if tensors equal for key in state_dict.keys(): assert torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) target_adapter_filename = "adapter_model.safetensors" if safe_serialization else "adapter_model.bin" # check if `adapter_model.safetensors` is present assert os.path.exists(os.path.join(tmp_dirname, target_adapter_filename)) assert os.path.exists(os.path.join(new_adapter_dir, target_adapter_filename)) # check if `adapter_config.json` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) assert os.path.exists(os.path.join(new_adapter_dir, "adapter_config.json")) # check if `model.safetensors` is not present assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors")) assert not os.path.exists(os.path.join(new_adapter_dir, "model.safetensors")) # check if `config.json` is not present assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) assert not os.path.exists(os.path.join(new_adapter_dir, "config.json")) self.check_modelcard(tmp_dirname, model) self.check_config_json(tmp_dirname, model) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname, selected_adapters=["default"]) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) assert "default" in model_from_pretrained.peft_config.keys() assert "new_adapter" not in model_from_pretrained.peft_config.keys() def _test_from_pretrained_config_construction(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls(base_model_name_or_path=model_id, **config_kwargs) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained( model_from_pretrained, tmp_dirname, is_trainable=False, config=config ) assert model_from_pretrained.peft_config["default"].inference_mode assert model_from_pretrained.peft_config["default"] is config def _test_load_multiple_adapters(self, model_id, config_cls, config_kwargs): # just ensure that this works and raises no error model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) del model model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) model = PeftModel.from_pretrained(model, tmp_dirname, torch_device=self.torch_device) load_result1 = model.load_adapter(tmp_dirname, adapter_name="other") load_result2 = model.load_adapter(tmp_dirname, adapter_name="yet-another") # VBLoRA uses a shared "vblora_vector_bank" across all layers, causing it to appear # in the missing keys list, which leads to failed test cases. So # skipping the missing keys check for VBLoRA. if config.peft_type != "VBLORA": assert load_result1.missing_keys == [] assert load_result2.missing_keys == [] def _test_merge_layers_fp16(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig, IA3Config, AdaLoraConfig, LoHaConfig, LoKrConfig, VBLoRAConfig): # Merge layers only supported for LoRA and IA³ return pytest.skip(f"Test not applicable for {config_cls}") if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") if (self.torch_device in ["cpu"]) and (version.parse(torch.__version__) <= version.parse("2.1")): self.skipTest("PyTorch 2.1 not supported for Half of addmm_impl_cpu_ ") model = self.transformers_class.from_pretrained(model_id, torch_dtype=torch.float16) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(device=self.torch_device, dtype=torch.float16) model.eval() # This should simply work _ = model.merge_and_unload() def _test_merge_layers_nan(self, model_id, config_cls, config_kwargs): if config_cls not in ( LoraConfig, IA3Config, AdaLoraConfig, LoHaConfig, LoKrConfig, VeraConfig, FourierFTConfig, ): # Merge layers only supported for LoRA and IA³ return if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) dummy_input = self.prepare_inputs_for_testing() model.eval() # This should work logits_unmerged = model(**dummy_input)[0] model = model.merge_and_unload() logits_merged = model(**dummy_input)[0] assert torch.allclose(logits_unmerged, logits_merged, atol=1e-3, rtol=1e-3) model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) for name, module in model.named_parameters(): if ( "lora_A" in name or "ia3" in name or "lora_E" in name or "lora_B" in name or "vera_lambda" in name or "fourierft_spectrum" in name ): module.data[0] = torch.nan with pytest.raises( ValueError, match="NaNs detected in the merged weights. The adapter default seems to be broken" ): model = model.merge_and_unload(safe_merge=True) for name, module in model.named_parameters(): if ( "lora_A" in name or "ia3" in name or "lora_E" in name or "lora_B" in name or "vera_lambda" in name or "fourierft_spectrum" in name ): module.data[0] = torch.inf with pytest.raises( ValueError, match="NaNs detected in the merged weights. The adapter default seems to be broken" ): model = model.merge_and_unload(safe_merge=True) def _test_merge_layers(self, model_id, config_cls, config_kwargs): if issubclass(config_cls, PromptLearningConfig): return pytest.skip(f"Test not applicable for {config_cls}") if issubclass(config_cls, (OFTConfig, BOFTConfig)): return pytest.skip(f"Test not applicable for {config_cls}") if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) dummy_input = self.prepare_inputs_for_testing() model.eval() logits = model(**dummy_input)[0] model.merge_adapter() logits_merged = model(**dummy_input)[0] model.unmerge_adapter() logits_unmerged = model(**dummy_input)[0] model = model.merge_and_unload() # check that PEFT layers are completely removed assert not any(isinstance(module, BaseTunerLayer) for module in model.modules()) logits_merged_unloaded = model(**dummy_input)[0] conv_ids = ["Conv2d", "Conv3d", "Conv2d2"] atol, rtol = 1e-4, 1e-4 if self.torch_device in ["mlu"]: atol, rtol = 1e-3, 1e-3 # MLU if config.peft_type == "ADALORA": # AdaLoRA is a bit flaky on CI, but this cannot be reproduced locally atol, rtol = 1e-2, 1e-2 if (config.peft_type in {"IA3", "LORA"}) and (model_id in conv_ids): # for some reason, the Conv introduces a larger error atol, rtol = 0.3, 0.01 assert torch.allclose(logits, logits_merged, atol=atol, rtol=rtol) assert torch.allclose(logits, logits_unmerged, atol=atol, rtol=rtol) assert torch.allclose(logits, logits_merged_unloaded, atol=atol, rtol=rtol) # For this test to work, weights should not be initialized to identity transform (e.g. # init_lora_weights should be False). transformers_model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) logits_transformers = transformers_model(**dummy_input)[0] assert not torch.allclose(logits_merged, logits_transformers, atol=1e-10, rtol=1e-10) # test that the logits are identical after a save-load-roundtrip if hasattr(model, "save_pretrained"): # model is a transformers model tmp_dirname = tempfile.mkdtemp() # note: not using the context manager here because it fails on Windows CI for some reason try: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(tmp_dirname).to(self.torch_device) finally: try: shutil.rmtree(tmp_dirname) except PermissionError: # windows error pass else: # model is not a transformers model model_from_pretrained = pickle.loads(pickle.dumps(model)) logits_merged_from_pretrained = model_from_pretrained(**dummy_input)[0] assert torch.allclose(logits_merged, logits_merged_from_pretrained, atol=atol, rtol=rtol) def _test_merge_layers_multi(self, model_id, config_cls, config_kwargs): supported_peft_types = [ PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.IA3, PeftType.OFT, PeftType.BOFT, PeftType.HRA, PeftType.BONE, ] if ("gpt2" in model_id.lower()) and (config_cls == IA3Config): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if config.peft_type not in supported_peft_types: return model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) dummy_input = self.prepare_inputs_for_testing() model.eval() with torch.inference_mode(): logits_adapter_1 = model(**dummy_input)[0] model.add_adapter("adapter-2", config) model.set_adapter("adapter-2") model.eval() with torch.inference_mode(): logits_adapter_2 = model(**dummy_input)[0] assert not torch.allclose(logits_adapter_1, logits_adapter_2, atol=1e-3, rtol=1e-3) model.set_adapter("default") with torch.inference_mode(): logits_adapter_1_after_set = model(**dummy_input)[0] assert torch.allclose(logits_adapter_1_after_set, logits_adapter_1, atol=1e-3, rtol=1e-3) model_copy = copy.deepcopy(model) model_copy_2 = copy.deepcopy(model) model_merged_all = model.merge_and_unload(adapter_names=["adapter-2", "default"]) with torch.inference_mode(): logits_merged_all = model_merged_all(**dummy_input)[0] assert not torch.allclose(logits_merged_all, logits_adapter_2, atol=1e-3, rtol=1e-3) assert not torch.allclose(logits_merged_all, logits_adapter_1, atol=1e-3, rtol=1e-3) model_merged_adapter_2 = model_copy.merge_and_unload(adapter_names=["adapter-2"]) with torch.inference_mode(): logits_merged_adapter_2 = model_merged_adapter_2(**dummy_input)[0] assert torch.allclose(logits_merged_adapter_2, logits_adapter_2, atol=1e-3, rtol=1e-3) model_merged_adapter_default = model_copy_2.merge_and_unload(adapter_names=["default"]) with torch.inference_mode(): logits_merged_adapter_default = model_merged_adapter_default(**dummy_input)[0] assert torch.allclose(logits_merged_adapter_default, logits_adapter_1, atol=1e-3, rtol=1e-3) def _test_merge_layers_is_idempotent(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) model.eval() torch.manual_seed(0) model.merge_adapter() logits_0 = model(**self.prepare_inputs_for_testing())[0] # merging again should not change anything # also check warning: with pytest.warns(UserWarning, match="All adapters are already merged, nothing to do"): model.merge_adapter() logits_1 = model(**self.prepare_inputs_for_testing())[0] assert torch.allclose(logits_0, logits_1, atol=1e-6, rtol=1e-6) def _test_safe_merge(self, model_id, config_cls, config_kwargs): torch.manual_seed(0) model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = model.to(self.torch_device).eval() inputs = self.prepare_inputs_for_testing() logits_base = model(**inputs)[0] model = get_peft_model(model, config).eval() logits_peft = model(**inputs)[0] atol, rtol = 1e-6, 1e-6 # default # Initializing with LN tuning cannot be configured to change the outputs (unlike init_lora_weights=False) if not issubclass(config_cls, LNTuningConfig): # sanity check that the logits are different assert not torch.allclose(logits_base, logits_peft, atol=atol, rtol=rtol) model_unloaded = model.merge_and_unload(safe_merge=True) logits_unloaded = model_unloaded(**inputs)[0] if self.torch_device in ["mlu"]: atol, rtol = 1e-3, 1e-3 # MLU conv_ids = ["Conv2d", "Conv3d", "Conv2d2"] if issubclass(config_cls, (IA3Config, LoraConfig)) and model_id in conv_ids: # more instability with Conv atol, rtol = 1e-3, 1e-3 # check that the logits are the same after unloading assert torch.allclose(logits_peft, logits_unloaded, atol=atol, rtol=rtol) # Ensure that serializing with safetensors works, there was an error when weights were not contiguous with tempfile.TemporaryDirectory() as tmp_dirname: # serializing with torch.save works torch.save(model_unloaded.state_dict(), os.path.join(tmp_dirname, "model.bin")) # serializing with safetensors works save_file(model_unloaded.state_dict(), os.path.join(tmp_dirname, "model.safetensors")) def _test_mixed_adapter_batches(self, model_id, config_cls, config_kwargs): # Test for mixing different adapters in a single batch by passing the adapter_names argument if config_cls not in (LoraConfig,): return pytest.skip(f"Mixed adapter batches not supported for {config_cls}") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) torch.manual_seed(0) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config, adapter_name="adapter0").eval() model.add_adapter("adapter1", config) model = model.to(self.torch_device).eval() dummy_input = self.prepare_inputs_for_testing() # ensure that we have at least 3 samples for this test dummy_input = {k: torch.cat([v for _ in range(3)]) for k, v in dummy_input.items()} with torch.inference_mode(): with model.disable_adapter(): output_base = model(**dummy_input)[0] logits_base = model.generate(**dummy_input, return_dict_in_generate=True, output_scores=True).scores[0] model.set_adapter("adapter0") with torch.inference_mode(): output_adapter0 = model(**dummy_input)[0] logits_adapter0 = model.generate(**dummy_input, return_dict_in_generate=True, output_scores=True).scores[0] model.set_adapter("adapter1") with torch.inference_mode(): output_adapter1 = model(**dummy_input)[0] logits_adapter1 = model.generate(**dummy_input, return_dict_in_generate=True, output_scores=True).scores[0] atol, rtol = 1e-4, 1e-4 # sanity check that there are enough outputs and that they are different assert len(output_base) == len(output_adapter0) == len(output_adapter1) >= 3 assert len(logits_base) == len(logits_adapter0) == len(logits_adapter1) >= 3 assert not torch.allclose(output_base, output_adapter0, atol=atol, rtol=rtol) assert not torch.allclose(output_base, output_adapter1, atol=atol, rtol=rtol) assert not torch.allclose(output_adapter0, output_adapter1, atol=atol, rtol=rtol) assert not torch.allclose(logits_base, logits_adapter0, atol=atol, rtol=rtol) assert not torch.allclose(logits_base, logits_adapter1, atol=atol, rtol=rtol) assert not torch.allclose(logits_adapter0, logits_adapter1, atol=atol, rtol=rtol) # alternate between base model, adapter0, and adapter1 adapters = ["__base__", "adapter0", "adapter1"] dummy_input["adapter_names"] = [adapters[i % 3] for i in (range(len(dummy_input["input_ids"])))] with torch.inference_mode(): output_mixed = model(**dummy_input)[0] logits_mixed = model.generate(**dummy_input, return_dict_in_generate=True, output_scores=True).scores[0] assert torch.allclose(output_base[::3], output_mixed[::3], atol=atol, rtol=rtol) assert torch.allclose(output_adapter0[1::3], output_mixed[1::3], atol=atol, rtol=rtol) assert torch.allclose(output_adapter1[2::3], output_mixed[2::3], atol=atol, rtol=rtol) assert torch.allclose(logits_base[::3], logits_mixed[::3], atol=atol, rtol=rtol) assert torch.allclose(logits_adapter0[1::3], logits_mixed[1::3], atol=atol, rtol=rtol) assert torch.allclose(logits_adapter1[2::3], logits_mixed[2::3], atol=atol, rtol=rtol) def _test_generate_with_mixed_adapter_batches_and_beam_search(self, model_id, config_cls, config_kwargs): # Test generating with beam search and with mixing different adapters in a single batch by passing the # adapter_names argument. See #2283. if config_cls not in (LoraConfig,): return pytest.skip(f"Mixed adapter batches not supported for {config_cls}") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) torch.manual_seed(0) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config, adapter_name="adapter0").eval() model.add_adapter("adapter1", config) # In contrast to forward, for generate, it can sometimes happen that we get the same results as the base model # even with LoRA applied because the impact of LoRA is not big enough. Therefore, use this "trick" to make LoRA # stronger. for name, param in model.named_parameters(): if model.base_model.prefix in name: param.data.mul_(10.0) model = model.to(self.torch_device).eval() dummy_input = self.prepare_inputs_for_testing() # ensure that we have at least 3 samples for this test dummy_input = {k: torch.cat([v for _ in range(3)]) for k, v in dummy_input.items()} gen_kwargs = {**dummy_input, "max_length": 20, "num_beams": 10, "early_stopping": True} with torch.inference_mode(): with model.disable_adapter(): gen_base = model.generate(**gen_kwargs) model.set_adapter("adapter0") with torch.inference_mode(): gen_adapter0 = model.generate(**gen_kwargs) model.set_adapter("adapter1") with torch.inference_mode(): gen_adapter1 = model.generate(**gen_kwargs) def remove_padding(seq, pad_value): lst = list(seq) while lst and (lst[-1] == pad_value): lst.pop() return lst def gens_are_same(gen0, gen1): # Special function to compare generations. We cannot use torch.allclose it will raise an error when sequence # lengths differ. Morevoer, we need to remove the padding from the sequences. This is because, even though # normally identical sequences should have the same length, when we do mixed adapter batches, each sample # will be padded to the longest sequence in that mixed batch, which can be different from the longest # sequence without mixed adapter batches. pad_value = model.config.eos_token_id for sample0, sample1 in zip(gen0, gen1): sample0 = remove_padding(sample0, pad_value) sample1 = remove_padding(sample1, pad_value) if (len(sample0) != len(sample1)) or (sample0 != sample1): # at least one sample differs, the generations are not identical return False return True # sanity check that there are enough outputs and that they are different assert len(gen_base) == len(gen_adapter0) == len(gen_adapter1) assert len(gen_adapter1) >= 3 assert not gens_are_same(gen_base, gen_adapter0) assert not gens_are_same(gen_base, gen_adapter1) assert not gens_are_same(gen_adapter0, gen_adapter1) # alternate between base model, adapter0, and adapter1 adapters = ["__base__", "adapter0", "adapter1"] gen_kwargs["adapter_names"] = [adapters[i % 3] for i in (range(len(dummy_input["input_ids"])))] with torch.inference_mode(): gen_mixed = model.generate(**gen_kwargs) assert gens_are_same(gen_base[::3], gen_mixed[::3]) assert gens_are_same(gen_adapter0[1::3], gen_mixed[1::3]) assert gens_are_same(gen_adapter1[2::3], gen_mixed[2::3]) def _test_generate(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `generate` works _ = model.generate(**inputs) def _test_generate_pos_args(self, model_id, config_cls, config_kwargs, raises_err: bool): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() if raises_err: with pytest.raises(TypeError): # check if `generate` raises an error if positional arguments are passed _ = model.generate(inputs["input_ids"]) else: # check if `generate` works if positional arguments are passed _ = model.generate(inputs["input_ids"]) def _test_generate_half_prec(self, model_id, config_cls, config_kwargs): if config_cls not in (IA3Config, LoraConfig, PrefixTuningConfig): return pytest.skip(f"Test not applicable for {config_cls}") if self.torch_device == "mps": # BFloat16 is not supported on MPS return pytest.skip("BFloat16 is not supported on MPS") model = self.transformers_class.from_pretrained(model_id, torch_dtype=torch.bfloat16) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) # check if `generate` works _ = model.generate(input_ids=input_ids, attention_mask=attention_mask) def _test_prefix_tuning_half_prec_conversion(self, model_id, config_cls, config_kwargs): if config_cls not in (PrefixTuningConfig,): return pytest.skip(f"Test not applicable for {config_cls}") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.half() assert model.base_model_torch_dtype == torch.float16 def _test_training(self, model_id, config_cls, config_kwargs): if issubclass(config_cls, PromptLearningConfig): return pytest.skip(f"Test not applicable for {config_cls}") if (config_cls == AdaLoraConfig) and ("roberta" in model_id.lower()): # TODO: no gradients on the "dense" layer, other layers work, not sure why self.skipTest("AdaLora with RoBERTa does not work correctly") model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] loss = output.sum() loss.backward() parameter_prefix = model.prefix for n, param in model.named_parameters(): if (parameter_prefix in n) or ("modules_to_save" in n): assert param.grad is not None else: assert param.grad is None def _test_inference_safetensors(self, model_id, config_cls, config_kwargs): if (config_cls == PrefixTuningConfig) and ("deberta" in model_id.lower()): # TODO: raises an error: # TypeError: DebertaModel.forward() got an unexpected keyword argument 'past_key_values' self.skipTest("DeBERTa with PrefixTuning does not work correctly") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] logits = output[0] loss = output.sum() loss.backward() # set to eval mode, since things like dropout can affect the output otherwise model.eval() logits = model(**inputs)[0][0] with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname, safe_serialization=True) assert "adapter_model.safetensors" in os.listdir(tmp_dirname) assert "adapter_model.bin" not in os.listdir(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname).to(self.torch_device) logits_from_pretrained = model_from_pretrained(**inputs)[0][0] assert torch.allclose(logits, logits_from_pretrained, atol=1e-4, rtol=1e-4) def _test_training_layer_indexing(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig,): return pytest.skip(f"Test not applicable for {config_cls}") config = config_cls( base_model_name_or_path=model_id, layers_to_transform=[0], **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] logits = output[0] loss = output.sum() loss.backward() nb_trainable = 0 for n, param in model.named_parameters(): if "lora" in n: assert param.grad is not None nb_trainable += 1 else: assert param.grad is None with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname).to(self.torch_device) logits_from_pretrained = model_from_pretrained(**inputs)[0][0] assert torch.allclose(logits, logits_from_pretrained, atol=1e-4, rtol=1e-4) model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) nb_trainable_all = 0 for n, param in model.named_parameters(): if "lora" in n: nb_trainable_all += 1 assert nb_trainable < nb_trainable_all def _test_training_gradient_checkpointing(self, model_id, config_cls, config_kwargs): if config_cls == PrefixTuningConfig: return pytest.skip(f"Test not applicable for {config_cls}") if (config_cls == AdaLoraConfig) and ("roberta" in model_id.lower()): # TODO: no gradients on the "dense" layer, other layers work, not sure why self.skipTest("AdaLora with RoBERTa does not work correctly") if (config_cls == OFTConfig) and ("deberta" in model_id.lower()): # TODO: no gradients on the "dense" layer, other layers work, not sure why self.skipTest("OFT with Deberta does not work correctly") model = self.transformers_class.from_pretrained(model_id) if not getattr(model, "supports_gradient_checkpointing", False): return pytest.skip(f"Model {model_id} does not support gradient checkpointing") model.gradient_checkpointing_enable() config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] loss = output.sum() loss.backward() for n, param in model.named_parameters(): if "prompt_encoder." in n: # prompt tuning methods if not issubclass(config_cls, CPTConfig): assert param.grad is not None elif ( "delta_embedding" in n ): # delta_embedding is the embedding that should be updated with grads in CPT assert param.grad is not None elif hasattr(model, "prefix") and (model.prefix in n): # non-prompt tuning methods assert param.grad is not None else: assert param.grad is None def _test_peft_model_device_map(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig, VBLoRAConfig): return pytest.skip(f"Test not applicable for {config_cls}") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) _ = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname, device_map={"": "cpu"}).to( self.torch_device ) def _test_training_prompt_learning_tasks(self, model_id, config_cls, config_kwargs): if not issubclass(config_cls, PromptLearningConfig): return pytest.skip(f"Test not applicable for {config_cls}") model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] loss = output.sum() loss.backward() if issubclass(config_cls, CPTConfig): parameters = [] for name, param in model.prompt_encoder.named_parameters(): if name != "default.embedding.weight": parameters.append(param) else: parameters = model.prompt_encoder.parameters() # check that prompt encoder has grads for param in parameters: assert param.grad is not None def _test_delete_adapter(self, model_id, config_cls, config_kwargs): supported_peft_types = [ PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.IA3, PeftType.OFT, PeftType.BOFT, PeftType.VERA, PeftType.FOURIERFT, PeftType.HRA, PeftType.VBLORA, PeftType.BONE, ] # IA3 does not support deleting adapters yet, but it just needs to be added # AdaLora does not support multiple adapters config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if config.peft_type not in supported_peft_types: return pytest.skip(f"Test not applicable for {config.peft_type}") model = self.transformers_class.from_pretrained(model_id) adapter_to_delete = "delete_me" model = get_peft_model(model, config) model.add_adapter(adapter_to_delete, config) model.set_adapter(adapter_to_delete) model = model.to(self.torch_device) model.delete_adapter(adapter_to_delete) assert adapter_to_delete not in model.peft_config assert model.active_adapters == ["default"] key_list = [key for key, _ in model.named_modules()] for key in key_list: _, target, _ = _get_submodules(model, key) attributes_to_check = getattr(target, "adapter_layer_names", []) + getattr(target, "other_param_names", []) for attr in attributes_to_check: assert adapter_to_delete not in getattr(target, attr) # check that we can also delete the last remaining adapter model.delete_adapter("default") assert "default" not in model.peft_config assert model.active_adapters == [] input = self.prepare_inputs_for_testing() # note: we cannot call model(**input) because PeftModel always expects there to be at least one adapter model.base_model(**input) # should not raise an error def _test_delete_inactive_adapter(self, model_id, config_cls, config_kwargs): # same as test_delete_adapter, but this time an inactive adapter is deleted supported_peft_types = [ PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.IA3, PeftType.OFT, PeftType.BOFT, PeftType.FOURIERFT, PeftType.HRA, PeftType.VBLORA, PeftType.BONE, ] # IA3 does not support deleting adapters yet, but it just needs to be added # AdaLora does not support multiple adapters config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if config.peft_type not in supported_peft_types: return pytest.skip(f"Test not applicable for {config.peft_type}") model = self.transformers_class.from_pretrained(model_id) adapter_to_delete = "delete_me" model = get_peft_model(model, config) model.add_adapter(adapter_to_delete, config) # "delete_me" is added but not activated model = model.to(self.torch_device) model.delete_adapter(adapter_to_delete) assert adapter_to_delete not in model.peft_config assert model.active_adapters == ["default"] key_list = [key for key, _ in model.named_modules()] for key in key_list: _, target, _ = _get_submodules(model, key) attributes_to_check = getattr(target, "adapter_layer_names", []) + getattr(target, "other_param_names", []) for attr in attributes_to_check: assert adapter_to_delete not in getattr(target, attr) # check that we can also delete the last remaining adapter model.delete_adapter("default") assert "default" not in model.peft_config assert model.active_adapters == [] input = self.prepare_inputs_for_testing() # note: we cannot call model(**input) because PeftModel always expects there to be at least one adapter model.base_model(**input) # should not raise an error def _test_unload_adapter(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) num_params_base = len(model.state_dict()) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) if config.peft_type not in ( "LORA", "ADALORA", "IA3", "BOFT", "OFT", "VERA", "FOURIERFT", "HRA", "VBLORA", "BONE", ): with pytest.raises(AttributeError): model = model.unload() else: dummy_input = self.prepare_inputs_for_testing() logits_with_adapter = model(**dummy_input)[0] transformers_model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) logits_transformers = transformers_model(**dummy_input)[0] model.eval() model = model.unload() logits_unload = model(**dummy_input)[0] num_params_unloaded = len(model.state_dict()) # check that PEFT layers are completely removed assert not any(isinstance(module, BaseTunerLayer) for module in model.modules()) assert not torch.allclose(logits_with_adapter, logits_unload, atol=1e-10, rtol=1e-10) assert torch.allclose(logits_transformers, logits_unload, atol=1e-4, rtol=1e-4) assert num_params_base == num_params_unloaded def _test_weighted_combination_of_adapters_lora(self, model, config, adapter_list, weight_list): model.add_adapter(adapter_list[1], config) model.add_adapter(adapter_list[2], replace(config, r=20)) model = model.to(self.torch_device) # test re-weighting single adapter model.add_weighted_adapter([adapter_list[0]], [weight_list[0]], "single_adapter_reweighting") # test svd re-weighting with multiple adapters model.add_weighted_adapter(adapter_list[1:], weight_list[1:], "multi_adapter_svd_reweighting") # test ties_svd re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_ties_svd_reweighting", combination_type="ties_svd", density=0.5, ) # test dare_linear_svd re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_dare_linear_svd_reweighting", combination_type="dare_linear_svd", density=0.5, ) # test dare_ties_svd re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_dare_ties_svd_reweighting", combination_type="dare_ties_svd", density=0.5, ) # test magnitude_prune_svd re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_magnitude_prune_svd_reweighting", combination_type="magnitude_prune_svd", density=0.5, ) # test cat re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_cat_reweighting", combination_type="cat" ) # test linear re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_linear_reweighting", combination_type="linear" ) # test ties re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_ties_reweighting", combination_type="ties", density=0.5 ) # test dare_linear re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_dare_linear_reweighting", combination_type="dare_linear", density=0.5, ) # test dare_ties re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_dare_ties_reweighting", combination_type="dare_ties", density=0.5, ) # test magnitude_prune re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_magnitude_prune_reweighting", combination_type="magnitude_prune", density=0.5, ) # test linear re-weighting with multiple adapters with only first adapter having non zero weight model.add_weighted_adapter( adapter_list[:2], [weight_list[0], 0], "multi_adapter_linear_reweighting_single_enabled", combination_type="linear", ) with pytest.raises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_linear_reweighting_uneven_r", combination_type="linear", ) with pytest.raises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_ties_reweighting_uneven_r", combination_type="ties", density=0.5, ) with pytest.raises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_dare_linear_reweighting_uneven_r", combination_type="dare_linear", density=0.5, ) with pytest.raises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_dare_ties_reweighting_uneven_r", combination_type="dare_ties", density=0.5, ) with pytest.raises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_magnitude_prune_reweighting_uneven_r", combination_type="magnitude_prune", density=0.5, ) new_adapters = [ "single_adapter_reweighting", "multi_adapter_svd_reweighting", "multi_adapter_ties_svd_reweighting", "multi_adapter_dare_linear_svd_reweighting", "multi_adapter_dare_ties_svd_reweighting", "multi_adapter_magnitude_prune_svd_reweighting", "multi_adapter_cat_reweighting", "multi_adapter_linear_reweighting", "multi_adapter_linear_reweighting_single_enabled", "multi_adapter_ties_reweighting", "multi_adapter_dare_linear_reweighting", "multi_adapter_dare_ties_reweighting", "multi_adapter_magnitude_prune_reweighting", ] for new_adapter in new_adapters: assert new_adapter in model.peft_config key_list = [key for key, _ in model.named_modules()] for key in key_list: _, target, _ = _get_submodules(model, key) if isinstance(target, LoraLayer): for adapter_name in new_adapters: if "single" in adapter_name: new_delta_weight = target.get_delta_weight(adapter_name) weighted_original_delta_weights = target.get_delta_weight(adapter_list[0]) * weight_list[0] assert torch.allclose(new_delta_weight, weighted_original_delta_weights, atol=1e-4, rtol=1e-4) elif "svd" in adapter_name: assert target.r[adapter_name] == 20 elif "linear" in adapter_name: assert target.r[adapter_name] == 8 elif "cat" in adapter_name: assert target.r[adapter_name] == 28 dummy_input = self.prepare_inputs_for_testing() model.eval() for adapter_name in new_adapters: # ensuring new adapters pass the forward loop model.set_adapter(adapter_name) assert model.active_adapter == adapter_name assert model.active_adapters == [adapter_name] model(**dummy_input)[0] def _test_weighted_combination_of_adapters_ia3(self, model, config, adapter_list, weight_list): model.add_adapter(adapter_list[1], config) model.add_adapter(adapter_list[2], config) model = model.to(self.torch_device) # test re-weighting single adapter model.add_weighted_adapter([adapter_list[0]], [weight_list[0]], "single_adapter_reweighting") # test re-weighting with multiple adapters model.add_weighted_adapter(adapter_list[1:], weight_list[1:], "multi_adapter_reweighting") new_adapters = [ "single_adapter_reweighting", "multi_adapter_reweighting", ] for new_adapter in new_adapters: assert new_adapter in model.peft_config dummy_input = self.prepare_inputs_for_testing() model.eval() for adapter_name in new_adapters: # ensuring new adapters pass the forward loop model.set_adapter(adapter_name) assert model.active_adapter == adapter_name assert model.active_adapters == [adapter_name] model(**dummy_input)[0] def _test_weighted_combination_of_adapters(self, model_id, config_cls, config_kwargs): if issubclass(config_cls, AdaLoraConfig): # AdaLora does not support adding more than 1 adapter return pytest.skip(f"Test not applicable for {config_cls}") if model_id.endswith("qwen2"): # Qwen2 fails with weighted adapter combinations using SVD return pytest.skip(f"Test does not work with model {model_id}") adapter_list = ["adapter1", "adapter_2", "adapter_3"] weight_list = [0.5, 1.5, 1.5] # Initialize the config config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if not isinstance(config, (LoraConfig, IA3Config)): # This test is only applicable for Lora and IA3 configs return pytest.skip(f"Test not applicable for {config}") model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config, adapter_list[0]) if isinstance(config, LoraConfig): self._test_weighted_combination_of_adapters_lora(model, config, adapter_list, weight_list) elif isinstance(config, IA3Config): self._test_weighted_combination_of_adapters_ia3(model, config, adapter_list, weight_list) else: pytest.skip(f"Test not applicable for {config}") def _test_disable_adapter(self, model_id, config_cls, config_kwargs): task_type = config_kwargs.get("task_type") if (task_type == "SEQ_2_SEQ_LM") and (config_cls in (PromptTuningConfig, PromptEncoderConfig)): self.skipTest("Seq2Seq + prompt tuning/prompt encoder does not work with disabling adapters") def get_output(model): # helper function that works with different model types torch.manual_seed(0) if hasattr(model, "generate"): # let's check the scores, not the output ids, since the latter can easily be identical even if the # weights are slightly changed output = model.generate(**input, return_dict_in_generate=True, output_scores=True).scores[0] # take element 0, as output is a tuple else: output = model(**input) if hasattr(output, "images"): # for SD import numpy as np img = output.images[0] return torch.from_numpy(np.array(img)) return output # initialize model model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) # output from BASE MODEL input = self.prepare_inputs_for_testing() output_before = get_output(model) # output from PEFT MODEL if hasattr(self, "instantiate_sd_peft"): # SD models are instantiated differently peft_model = self.instantiate_sd_peft(model_id, config_cls, config_kwargs) else: config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) peft_model = get_peft_model(model, config) output_peft = get_output(peft_model) # first check trivial case is not true that peft does not affect the output; for this to work, init_weight # must be False (if the config supports it) if isinstance(peft_model, StableDiffusionPipeline): # for SD, check that most pixels have different values assert (output_before != output_peft).float().mean() > 0.8 else: assert not torch.allclose(output_before, output_peft) # output with DISABLED ADAPTER if isinstance(peft_model, StableDiffusionPipeline): with peft_model.unet.disable_adapter(): with peft_model.text_encoder.disable_adapter(): output_peft_disabled = get_output(peft_model) # for SD, very rarely, a pixel can differ assert (output_before != output_peft_disabled).float().mean() < 1e-4 else: with peft_model.disable_adapter(): output_peft_disabled = get_output(peft_model) assert torch.allclose(output_before, output_peft_disabled, atol=1e-6, rtol=1e-6) # after leaving the disable_adapter context, the output should be the same as with enabled adapter again # see #1501 output_peft_after_disabled = get_output(peft_model) assert torch.allclose(output_peft, output_peft_after_disabled, atol=1e-6, rtol=1e-6) # TODO: add tests to check if disabling adapters works after calling merge_adapter def _test_adding_multiple_adapters_with_bias_raises(self, model_id, config_cls, config_kwargs): # When trying to add multiple adapters with bias in Lora, AdaLora or BOFTConfig, an error should be # raised. Also, the peft model should not be left in a half-initialized state. if not issubclass(config_cls, (LoraConfig, AdaLoraConfig, BOFTConfig)): return pytest.skip(f"Test not applicable for {config_cls}") config_kwargs = config_kwargs.copy() config_kwargs["bias"] = "all" config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config, "adapter0") if config_cls == LoraConfig or config_cls == AdaLoraConfig: with pytest.raises(ValueError): model.add_adapter("adapter1", replace(config, r=20)) if config_cls == BOFTConfig: with pytest.raises(ValueError): model.add_adapter("adapter1", replace(config, boft_block_num=1, boft_block_size=0)) # (superficial) test that the model is not left in a half-initialized state when adding an adapter fails assert "adapter1" not in model.peft_config assert "adapter1" not in model.base_model.peft_config def _test_passing_input_embeds_works(self, test_name, model_id, config_cls, config_kwargs): # https://github.com/huggingface/peft/issues/727 model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config, adapter_name="test-adapter").to(self.torch_device) dummy_input = self.prepare_inputs_for_testing() inputs_embeds = model.get_input_embeddings()(dummy_input["input_ids"]) # just check that no error is raised model.forward(inputs_embeds=inputs_embeds)
peft/tests/testing_common.py/0
{ "file_path": "peft/tests/testing_common.py", "repo_id": "peft", "token_count": 34640 }
#!/usr/bin/env python3 """ Model Benchmark Script An inference and train step benchmark script for timm models. Hacked together by Ross Wightman (https://github.com/rwightman) """ import argparse import csv import json import logging import time from collections import OrderedDict from contextlib import suppress from functools import partial import torch import torch.nn as nn import torch.nn.parallel from timm.data import resolve_data_config from timm.layers import set_fast_norm from timm.models import create_model, is_model, list_models from timm.optim import create_optimizer_v2 from timm.utils import setup_default_logging, set_jit_fuser, decay_batch_step, check_batch_size_retry, ParseKwargs,\ reparameterize_model has_apex = False try: from apex import amp has_apex = True except ImportError: pass try: from deepspeed.profiling.flops_profiler import get_model_profile has_deepspeed_profiling = True except ImportError as e: has_deepspeed_profiling = False try: from fvcore.nn import FlopCountAnalysis, flop_count_str, ActivationCountAnalysis has_fvcore_profiling = True except ImportError as e: FlopCountAnalysis = None has_fvcore_profiling = False try: from functorch.compile import memory_efficient_fusion has_functorch = True except ImportError as e: has_functorch = False has_compile = hasattr(torch, 'compile') if torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True _logger = logging.getLogger('validate') parser = argparse.ArgumentParser(description='PyTorch Benchmark') # benchmark specific args parser.add_argument('--model-list', metavar='NAME', default='', help='txt file based list of model names to benchmark') parser.add_argument('--bench', default='both', type=str, help="Benchmark mode. One of 'inference', 'train', 'both'. Defaults to 'both'") parser.add_argument('--detail', action='store_true', default=False, help='Provide train fwd/bwd/opt breakdown detail if True. Defaults to False') parser.add_argument('--no-retry', action='store_true', default=False, help='Do not decay batch size and retry on error.') parser.add_argument('--results-file', default='', type=str, help='Output csv file for validation results (summary)') parser.add_argument('--results-format', default='csv', type=str, help='Format for results file one of (csv, json) (default: csv).') parser.add_argument('--num-warm-iter', default=10, type=int, help='Number of warmup iterations (default: 10)') parser.add_argument('--num-bench-iter', default=40, type=int, help='Number of benchmark iterations (default: 40)') parser.add_argument('--device', default='cuda', type=str, help="device to run benchmark on") # common inference / train args parser.add_argument('--model', '-m', metavar='NAME', default='resnet50', help='model architecture (default: resnet50)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)') parser.add_argument('--img-size', default=None, type=int, metavar='N', help='Input image dimension, uses model default if empty') parser.add_argument('--input-size', default=None, nargs=3, type=int, metavar='N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') parser.add_argument('--use-train-size', action='store_true', default=False, help='Run inference at train size, not test-input-size if it exists.') parser.add_argument('--num-classes', type=int, default=None, help='Number classes in dataset') parser.add_argument('--gp', default=None, type=str, metavar='POOL', help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') parser.add_argument('--channels-last', action='store_true', default=False, help='Use channels_last memory layout') parser.add_argument('--grad-checkpointing', action='store_true', default=False, help='Enable gradient checkpointing through model blocks/stages') parser.add_argument('--amp', action='store_true', default=False, help='use PyTorch Native AMP for mixed precision training. Overrides --precision arg.') parser.add_argument('--amp-dtype', default='float16', type=str, help='lower precision AMP dtype (default: float16). Overrides --precision arg if args.amp True.') parser.add_argument('--precision', default='float32', type=str, help='Numeric precision. One of (amp, float32, float16, bfloat16, tf32)') parser.add_argument('--fuser', default='', type=str, help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')") parser.add_argument('--fast-norm', default=False, action='store_true', help='enable experimental fast-norm') parser.add_argument('--reparam', default=False, action='store_true', help='Reparameterize model') parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs) parser.add_argument('--torchcompile-mode', type=str, default=None, help="torch.compile mode (default: None).") # codegen (model compilation) options scripting_group = parser.add_mutually_exclusive_group() scripting_group.add_argument('--torchscript', dest='torchscript', action='store_true', help='convert model torchscript for inference') scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor', help="Enable compilation w/ specified backend (default: inductor).") scripting_group.add_argument('--aot-autograd', default=False, action='store_true', help="Enable AOT Autograd optimization.") # train optimizer parameters parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', help='Optimizer (default: "sgd"') parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: None, use opt default)') parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='Optimizer momentum (default: 0.9)') parser.add_argument('--weight-decay', type=float, default=0.0001, help='weight decay (default: 0.0001)') parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--clip-mode', type=str, default='norm', help='Gradient clipping mode. One of ("norm", "value", "agc")') # model regularization / loss params that impact model or loss fn parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)') parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') parser.add_argument('--drop-path', type=float, default=None, metavar='PCT', help='Drop path rate (default: None)') parser.add_argument('--drop-block', type=float, default=None, metavar='PCT', help='Drop block rate (default: None)') def timestamp(sync=False): return time.perf_counter() def cuda_timestamp(sync=False, device=None): if sync: torch.cuda.synchronize(device=device) return time.perf_counter() def count_params(model: nn.Module): return sum([m.numel() for m in model.parameters()]) def resolve_precision(precision: str): assert precision in ('amp', 'amp_bfloat16', 'float16', 'bfloat16', 'float32') amp_dtype = None # amp disabled model_dtype = torch.float32 data_dtype = torch.float32 if precision == 'amp': amp_dtype = torch.float16 elif precision == 'amp_bfloat16': amp_dtype = torch.bfloat16 elif precision == 'float16': model_dtype = torch.float16 data_dtype = torch.float16 elif precision == 'bfloat16': model_dtype = torch.bfloat16 data_dtype = torch.bfloat16 return amp_dtype, model_dtype, data_dtype def profile_deepspeed(model, input_size=(3, 224, 224), batch_size=1, detailed=False): _, macs, _ = get_model_profile( model=model, input_shape=(batch_size,) + input_size, # input shape/resolution print_profile=detailed, # prints the model graph with the measured profile attached to each module detailed=detailed, # print the detailed profile warm_up=10, # the number of warm-ups before measuring the time of each module as_string=False, # print raw numbers (e.g. 1000) or as human-readable strings (e.g. 1k) output_file=None, # path to the output file. If None, the profiler prints to stdout. ignore_modules=None) # the list of modules to ignore in the profiling return macs, 0 # no activation count in DS def profile_fvcore(model, input_size=(3, 224, 224), batch_size=1, detailed=False, force_cpu=False): if force_cpu: model = model.to('cpu') device, dtype = next(model.parameters()).device, next(model.parameters()).dtype example_input = torch.ones((batch_size,) + input_size, device=device, dtype=dtype) fca = FlopCountAnalysis(model, example_input) aca = ActivationCountAnalysis(model, example_input) if detailed: fcs = flop_count_str(fca) print(fcs) return fca.total(), aca.total() class BenchmarkRunner: def __init__( self, model_name, detail=False, device='cuda', torchscript=False, torchcompile=None, torchcompile_mode=None, aot_autograd=False, reparam=False, precision='float32', fuser='', num_warm_iter=10, num_bench_iter=50, use_train_size=False, **kwargs ): self.model_name = model_name self.detail = detail self.device = device self.amp_dtype, self.model_dtype, self.data_dtype = resolve_precision(precision) self.channels_last = kwargs.pop('channels_last', False) if self.amp_dtype is not None: self.amp_autocast = partial(torch.amp.autocast, device_type=device, dtype=self.amp_dtype) else: self.amp_autocast = suppress if fuser: set_jit_fuser(fuser) self.model = create_model( model_name, num_classes=kwargs.pop('num_classes', None), in_chans=3, global_pool=kwargs.pop('gp', 'fast'), scriptable=torchscript, drop_rate=kwargs.pop('drop', 0.), drop_path_rate=kwargs.pop('drop_path', None), drop_block_rate=kwargs.pop('drop_block', None), **kwargs.pop('model_kwargs', {}), ) if reparam: self.model = reparameterize_model(self.model) self.model.to( device=self.device, dtype=self.model_dtype, memory_format=torch.channels_last if self.channels_last else None, ) self.num_classes = self.model.num_classes self.param_count = count_params(self.model) _logger.info('Model %s created, param count: %d' % (model_name, self.param_count)) data_config = resolve_data_config(kwargs, model=self.model, use_test_size=not use_train_size) self.input_size = data_config['input_size'] self.batch_size = kwargs.pop('batch_size', 256) self.compiled = False if torchscript: self.model = torch.jit.script(self.model) self.compiled = True elif torchcompile: assert has_compile, 'A version of torch w/ torch.compile() is required, possibly a nightly.' torch._dynamo.reset() self.model = torch.compile(self.model, backend=torchcompile, mode=torchcompile_mode) self.compiled = True elif aot_autograd: assert has_functorch, "functorch is needed for --aot-autograd" self.model = memory_efficient_fusion(self.model) self.compiled = True self.example_inputs = None self.num_warm_iter = num_warm_iter self.num_bench_iter = num_bench_iter self.log_freq = num_bench_iter // 5 if 'cuda' in self.device: self.time_fn = partial(cuda_timestamp, device=self.device) else: self.time_fn = timestamp def _init_input(self): self.example_inputs = torch.randn( (self.batch_size,) + self.input_size, device=self.device, dtype=self.data_dtype) if self.channels_last: self.example_inputs = self.example_inputs.contiguous(memory_format=torch.channels_last) class InferenceBenchmarkRunner(BenchmarkRunner): def __init__( self, model_name, device='cuda', torchscript=False, **kwargs ): super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) self.model.eval() def run(self): def _step(): t_step_start = self.time_fn() with self.amp_autocast(): output = self.model(self.example_inputs) t_step_end = self.time_fn(True) return t_step_end - t_step_start _logger.info( f'Running inference benchmark on {self.model_name} for {self.num_bench_iter} steps w/ ' f'input size {self.input_size} and batch size {self.batch_size}.') with torch.no_grad(): self._init_input() for _ in range(self.num_warm_iter): _step() total_step = 0. num_samples = 0 t_run_start = self.time_fn() for i in range(self.num_bench_iter): delta_fwd = _step() total_step += delta_fwd num_samples += self.batch_size num_steps = i + 1 if num_steps % self.log_freq == 0: _logger.info( f"Infer [{num_steps}/{self.num_bench_iter}]." f" {num_samples / total_step:0.2f} samples/sec." f" {1000 * total_step / num_steps:0.3f} ms/step.") t_run_end = self.time_fn(True) t_run_elapsed = t_run_end - t_run_start results = dict( samples_per_sec=round(num_samples / t_run_elapsed, 2), step_time=round(1000 * total_step / self.num_bench_iter, 3), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1e6, 2), ) retries = 0 if self.compiled else 2 # skip profiling if model is scripted while retries: retries -= 1 try: if has_deepspeed_profiling: macs, _ = profile_deepspeed(self.model, self.input_size) results['gmacs'] = round(macs / 1e9, 2) elif has_fvcore_profiling: macs, activations = profile_fvcore(self.model, self.input_size, force_cpu=not retries) results['gmacs'] = round(macs / 1e9, 2) results['macts'] = round(activations / 1e6, 2) except RuntimeError as e: pass _logger.info( f"Inference benchmark of {self.model_name} done. " f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/step") return results class TrainBenchmarkRunner(BenchmarkRunner): def __init__( self, model_name, device='cuda', torchscript=False, **kwargs ): super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) self.model.train() self.loss = nn.CrossEntropyLoss().to(self.device) self.target_shape = tuple() self.optimizer = create_optimizer_v2( self.model, opt=kwargs.pop('opt', 'sgd'), lr=kwargs.pop('lr', 1e-4)) if kwargs.pop('grad_checkpointing', False): self.model.set_grad_checkpointing() def _gen_target(self, batch_size): return torch.empty( (batch_size,) + self.target_shape, device=self.device, dtype=torch.long).random_(self.num_classes) def run(self): def _step(detail=False): self.optimizer.zero_grad() # can this be ignored? t_start = self.time_fn() t_fwd_end = t_start t_bwd_end = t_start with self.amp_autocast(): output = self.model(self.example_inputs) if isinstance(output, tuple): output = output[0] if detail: t_fwd_end = self.time_fn(True) target = self._gen_target(output.shape[0]) self.loss(output, target).backward() if detail: t_bwd_end = self.time_fn(True) self.optimizer.step() t_end = self.time_fn(True) if detail: delta_fwd = t_fwd_end - t_start delta_bwd = t_bwd_end - t_fwd_end delta_opt = t_end - t_bwd_end return delta_fwd, delta_bwd, delta_opt else: delta_step = t_end - t_start return delta_step _logger.info( f'Running train benchmark on {self.model_name} for {self.num_bench_iter} steps w/ ' f'input size {self.input_size} and batch size {self.batch_size}.') self._init_input() for _ in range(self.num_warm_iter): _step() t_run_start = self.time_fn() if self.detail: total_fwd = 0. total_bwd = 0. total_opt = 0. num_samples = 0 for i in range(self.num_bench_iter): delta_fwd, delta_bwd, delta_opt = _step(True) num_samples += self.batch_size total_fwd += delta_fwd total_bwd += delta_bwd total_opt += delta_opt num_steps = (i + 1) if num_steps % self.log_freq == 0: total_step = total_fwd + total_bwd + total_opt _logger.info( f"Train [{num_steps}/{self.num_bench_iter}]." f" {num_samples / total_step:0.2f} samples/sec." f" {1000 * total_fwd / num_steps:0.3f} ms/step fwd," f" {1000 * total_bwd / num_steps:0.3f} ms/step bwd," f" {1000 * total_opt / num_steps:0.3f} ms/step opt." ) total_step = total_fwd + total_bwd + total_opt t_run_elapsed = self.time_fn() - t_run_start results = dict( samples_per_sec=round(num_samples / t_run_elapsed, 2), step_time=round(1000 * total_step / self.num_bench_iter, 3), fwd_time=round(1000 * total_fwd / self.num_bench_iter, 3), bwd_time=round(1000 * total_bwd / self.num_bench_iter, 3), opt_time=round(1000 * total_opt / self.num_bench_iter, 3), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1e6, 2), ) else: total_step = 0. num_samples = 0 for i in range(self.num_bench_iter): delta_step = _step(False) num_samples += self.batch_size total_step += delta_step num_steps = (i + 1) if num_steps % self.log_freq == 0: _logger.info( f"Train [{num_steps}/{self.num_bench_iter}]." f" {num_samples / total_step:0.2f} samples/sec." f" {1000 * total_step / num_steps:0.3f} ms/step.") t_run_elapsed = self.time_fn() - t_run_start results = dict( samples_per_sec=round(num_samples / t_run_elapsed, 2), step_time=round(1000 * total_step / self.num_bench_iter, 3), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1e6, 2), ) _logger.info( f"Train benchmark of {self.model_name} done. " f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/sample") return results class ProfileRunner(BenchmarkRunner): def __init__(self, model_name, device='cuda', profiler='', **kwargs): super().__init__(model_name=model_name, device=device, **kwargs) if not profiler: if has_deepspeed_profiling: profiler = 'deepspeed' elif has_fvcore_profiling: profiler = 'fvcore' assert profiler, "One of deepspeed or fvcore needs to be installed for profiling to work." self.profiler = profiler self.model.eval() def run(self): _logger.info( f'Running profiler on {self.model_name} w/ ' f'input size {self.input_size} and batch size {self.batch_size}.') macs = 0 activations = 0 if self.profiler == 'deepspeed': macs, _ = profile_deepspeed(self.model, self.input_size, batch_size=self.batch_size, detailed=True) elif self.profiler == 'fvcore': macs, activations = profile_fvcore(self.model, self.input_size, batch_size=self.batch_size, detailed=True) results = dict( gmacs=round(macs / 1e9, 2), macts=round(activations / 1e6, 2), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1e6, 2), ) _logger.info( f"Profile of {self.model_name} done. " f"{results['gmacs']:.2f} GMACs, {results['param_count']:.2f} M params.") return results def _try_run( model_name, bench_fn, bench_kwargs, initial_batch_size, no_batch_size_retry=False ): batch_size = initial_batch_size results = dict() error_str = 'Unknown' while batch_size: try: torch.cuda.empty_cache() bench = bench_fn(model_name=model_name, batch_size=batch_size, **bench_kwargs) results = bench.run() return results except RuntimeError as e: error_str = str(e) _logger.error(f'"{error_str}" while running benchmark.') if not check_batch_size_retry(error_str): _logger.error(f'Unrecoverable error encountered while benchmarking {model_name}, skipping.') break if no_batch_size_retry: break batch_size = decay_batch_step(batch_size) _logger.warning(f'Reducing batch size to {batch_size} for retry.') results['error'] = error_str return results def benchmark(args): if args.amp: _logger.warning("Overriding precision to 'amp' since --amp flag set.") args.precision = 'amp' if args.amp_dtype == 'float16' else '_'.join(['amp', args.amp_dtype]) _logger.info(f'Benchmarking in {args.precision} precision. ' f'{"NHWC" if args.channels_last else "NCHW"} layout. ' f'torchscript {"enabled" if args.torchscript else "disabled"}') bench_kwargs = vars(args).copy() bench_kwargs.pop('amp') model = bench_kwargs.pop('model') batch_size = bench_kwargs.pop('batch_size') bench_fns = (InferenceBenchmarkRunner,) prefixes = ('infer',) if args.bench == 'both': bench_fns = ( InferenceBenchmarkRunner, TrainBenchmarkRunner ) prefixes = ('infer', 'train') elif args.bench == 'train': bench_fns = TrainBenchmarkRunner, prefixes = 'train', elif args.bench.startswith('profile'): # specific profiler used if included in bench mode string, otherwise default to deepspeed, fallback to fvcore if 'deepspeed' in args.bench: assert has_deepspeed_profiling, "deepspeed must be installed to use deepspeed flop counter" bench_kwargs['profiler'] = 'deepspeed' elif 'fvcore' in args.bench: assert has_fvcore_profiling, "fvcore must be installed to use fvcore flop counter" bench_kwargs['profiler'] = 'fvcore' bench_fns = ProfileRunner, batch_size = 1 model_results = OrderedDict(model=model) for prefix, bench_fn in zip(prefixes, bench_fns): run_results = _try_run( model, bench_fn, bench_kwargs=bench_kwargs, initial_batch_size=batch_size, no_batch_size_retry=args.no_retry, ) if prefix and 'error' not in run_results: run_results = {'_'.join([prefix, k]): v for k, v in run_results.items()} model_results.update(run_results) if 'error' in run_results: break if 'error' not in model_results: param_count = model_results.pop('infer_param_count', model_results.pop('train_param_count', 0)) model_results.setdefault('param_count', param_count) model_results.pop('train_param_count', 0) return model_results def main(): setup_default_logging() args = parser.parse_args() model_cfgs = [] model_names = [] if args.fast_norm: set_fast_norm() if args.model_list: args.model = '' with open(args.model_list) as f: model_names = [line.rstrip() for line in f] model_cfgs = [(n, None) for n in model_names] elif args.model == 'all': # validate all models in a list of names with pretrained checkpoints args.pretrained = True model_names = list_models(pretrained=True, exclude_filters=['*in21k']) model_cfgs = [(n, None) for n in model_names] elif not is_model(args.model): # model name doesn't exist, try as wildcard filter model_names = list_models(args.model) model_cfgs = [(n, None) for n in model_names] if len(model_cfgs): _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) results = [] try: for m, _ in model_cfgs: if not m: continue args.model = m r = benchmark(args) if r: results.append(r) time.sleep(10) except KeyboardInterrupt as e: pass sort_key = 'infer_samples_per_sec' if 'train' in args.bench: sort_key = 'train_samples_per_sec' elif 'profile' in args.bench: sort_key = 'infer_gmacs' results = filter(lambda x: sort_key in x, results) results = sorted(results, key=lambda x: x[sort_key], reverse=True) else: results = benchmark(args) if args.results_file: write_results(args.results_file, results, format=args.results_format) # output results in JSON to stdout w/ delimiter for runner script print(f'--result\n{json.dumps(results, indent=4)}') def write_results(results_file, results, format='csv'): with open(results_file, mode='w') as cf: if format == 'json': json.dump(results, cf, indent=4) else: if not isinstance(results, (list, tuple)): results = [results] if not results: return dw = csv.DictWriter(cf, fieldnames=results[0].keys()) dw.writeheader() for r in results: dw.writerow(r) cf.flush() if __name__ == '__main__': main()
pytorch-image-models/benchmark.py/0
{ "file_path": "pytorch-image-models/benchmark.py", "repo_id": "pytorch-image-models", "token_count": 13296 }
# Big Transfer (BiT) **Big Transfer (BiT)** is a type of pretraining recipe that pre-trains on a large supervised source dataset, and fine-tunes the weights on the target task. Models are trained on the JFT-300M dataset. The finetuned models contained in this collection are finetuned on ImageNet. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('resnetv2_101x1_bitm', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `resnetv2_101x1_bitm`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('resnetv2_101x1_bitm', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{kolesnikov2020big, title={Big Transfer (BiT): General Visual Representation Learning}, author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby}, year={2020}, eprint={1912.11370}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Big Transfer Paper: Title: 'Big Transfer (BiT): General Visual Representation Learning' URL: https://paperswithcode.com/paper/large-scale-learning-of-general-visual Models: - Name: resnetv2_101x1_bitm In Collection: Big Transfer Metadata: FLOPs: 5330896 Parameters: 44540000 File Size: 178256468 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPUv3-512 ID: resnetv2_101x1_bitm LR: 0.03 Epochs: 90 Layers: 101 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 4096 Image Size: '480' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L444 Weights: https://storage.googleapis.com/bit_models/BiT-M-R101x1-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.21% Top 5 Accuracy: 96.47% - Name: resnetv2_101x3_bitm In Collection: Big Transfer Metadata: FLOPs: 15988688 Parameters: 387930000 File Size: 1551830100 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPUv3-512 ID: resnetv2_101x3_bitm LR: 0.03 Epochs: 90 Layers: 101 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 4096 Image Size: '480' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L451 Weights: https://storage.googleapis.com/bit_models/BiT-M-R101x3-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.38% Top 5 Accuracy: 97.37% - Name: resnetv2_152x2_bitm In Collection: Big Transfer Metadata: FLOPs: 10659792 Parameters: 236340000 File Size: 945476668 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M ID: resnetv2_152x2_bitm Crop Pct: '1.0' Image Size: '480' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L458 Weights: https://storage.googleapis.com/bit_models/BiT-M-R152x2-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.4% Top 5 Accuracy: 97.43% - Name: resnetv2_152x4_bitm In Collection: Big Transfer Metadata: FLOPs: 21317584 Parameters: 936530000 File Size: 3746270104 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPUv3-512 ID: resnetv2_152x4_bitm Crop Pct: '1.0' Image Size: '480' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L465 Weights: https://storage.googleapis.com/bit_models/BiT-M-R152x4-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.95% Top 5 Accuracy: 97.45% - Name: resnetv2_50x1_bitm In Collection: Big Transfer Metadata: FLOPs: 5330896 Parameters: 25550000 File Size: 102242668 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPUv3-512 ID: resnetv2_50x1_bitm LR: 0.03 Epochs: 90 Layers: 50 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 4096 Image Size: '480' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L430 Weights: https://storage.googleapis.com/bit_models/BiT-M-R50x1-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.19% Top 5 Accuracy: 95.63% - Name: resnetv2_50x3_bitm In Collection: Big Transfer Metadata: FLOPs: 15988688 Parameters: 217320000 File Size: 869321580 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPUv3-512 ID: resnetv2_50x3_bitm LR: 0.03 Epochs: 90 Layers: 50 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 4096 Image Size: '480' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L437 Weights: https://storage.googleapis.com/bit_models/BiT-M-R50x3-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.75% Top 5 Accuracy: 97.12% -->
pytorch-image-models/hfdocs/source/models/big-transfer.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/big-transfer.mdx", "repo_id": "pytorch-image-models", "token_count": 4103 }
# Noisy Student (EfficientNet) **Noisy Student Training** is a semi-supervised learning approach. It extends the idea of self-training and distillation with the use of equal-or-larger student models and noise added to the student during learning. It has three main steps: 1. train a teacher model on labeled images 2. use the teacher to generate pseudo labels on unlabeled images 3. train a student model on the combination of labeled images and pseudo labeled images. The algorithm is iterated a few times by treating the student as a teacher to relabel the unlabeled data and training a new student. Noisy Student Training seeks to improve on self-training and distillation in two ways. First, it makes the student larger than, or at least equal to, the teacher so the student can better learn from a larger dataset. Second, it adds noise to the student so the noised student is forced to learn harder from the pseudo labels. To noise the student, it uses input noise such as RandAugment data augmentation, and model noise such as dropout and stochastic depth during training. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('tf_efficientnet_b0_ns', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0_ns`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('tf_efficientnet_b0_ns', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{xie2020selftraining, title={Self-training with Noisy Student improves ImageNet classification}, author={Qizhe Xie and Minh-Thang Luong and Eduard Hovy and Quoc V. Le}, year={2020}, eprint={1911.04252}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` <!-- Type: model-index Collections: - Name: Noisy Student Paper: Title: Self-training with Noisy Student improves ImageNet classification URL: https://paperswithcode.com/paper/self-training-with-noisy-student-improves Models: - Name: tf_efficientnet_b0_ns In Collection: Noisy Student Metadata: FLOPs: 488688572 Parameters: 5290000 File Size: 21386709 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b0_ns LR: 0.128 Epochs: 700 Dropout: 0.5 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 2048 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1427 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.66% Top 5 Accuracy: 94.37% - Name: tf_efficientnet_b1_ns In Collection: Noisy Student Metadata: FLOPs: 883633200 Parameters: 7790000 File Size: 31516408 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b1_ns LR: 0.128 Epochs: 700 Dropout: 0.5 Crop Pct: '0.882' Momentum: 0.9 Batch Size: 2048 Image Size: '240' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1437 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.39% Top 5 Accuracy: 95.74% - Name: tf_efficientnet_b2_ns In Collection: Noisy Student Metadata: FLOPs: 1234321170 Parameters: 9110000 File Size: 36801803 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b2_ns LR: 0.128 Epochs: 700 Dropout: 0.5 Crop Pct: '0.89' Momentum: 0.9 Batch Size: 2048 Image Size: '260' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1447 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.39% Top 5 Accuracy: 96.24% - Name: tf_efficientnet_b3_ns In Collection: Noisy Student Metadata: FLOPs: 2275247568 Parameters: 12230000 File Size: 49385734 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b3_ns LR: 0.128 Epochs: 700 Dropout: 0.5 Crop Pct: '0.904' Momentum: 0.9 Batch Size: 2048 Image Size: '300' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1457 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.04% Top 5 Accuracy: 96.91% - Name: tf_efficientnet_b4_ns In Collection: Noisy Student Metadata: FLOPs: 5749638672 Parameters: 19340000 File Size: 77995057 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b4_ns LR: 0.128 Epochs: 700 Dropout: 0.5 Crop Pct: '0.922' Momentum: 0.9 Batch Size: 2048 Image Size: '380' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1467 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 85.15% Top 5 Accuracy: 97.47% - Name: tf_efficientnet_b5_ns In Collection: Noisy Student Metadata: FLOPs: 13176501888 Parameters: 30390000 File Size: 122404944 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b5_ns LR: 0.128 Epochs: 350 Dropout: 0.5 Crop Pct: '0.934' Momentum: 0.9 Batch Size: 2048 Image Size: '456' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1477 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 86.08% Top 5 Accuracy: 97.75% - Name: tf_efficientnet_b6_ns In Collection: Noisy Student Metadata: FLOPs: 24180518488 Parameters: 43040000 File Size: 173239537 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b6_ns LR: 0.128 Epochs: 350 Dropout: 0.5 Crop Pct: '0.942' Momentum: 0.9 Batch Size: 2048 Image Size: '528' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1487 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 86.45% Top 5 Accuracy: 97.88% - Name: tf_efficientnet_b7_ns In Collection: Noisy Student Metadata: FLOPs: 48205304880 Parameters: 66349999 File Size: 266853140 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b7_ns LR: 0.128 Epochs: 350 Dropout: 0.5 Crop Pct: '0.949' Momentum: 0.9 Batch Size: 2048 Image Size: '600' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1498 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 86.83% Top 5 Accuracy: 98.08% - Name: tf_efficientnet_l2_ns In Collection: Noisy Student Metadata: FLOPs: 611646113804 Parameters: 480310000 File Size: 1925950424 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod Training Time: 6 days ID: tf_efficientnet_l2_ns LR: 0.128 Epochs: 350 Dropout: 0.5 Crop Pct: '0.96' Momentum: 0.9 Batch Size: 2048 Image Size: '800' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1520 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 88.35% Top 5 Accuracy: 98.66% -->
pytorch-image-models/hfdocs/source/models/noisy-student.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/noisy-student.mdx", "repo_id": "pytorch-image-models", "token_count": 6685 }
# SPNASNet **Single-Path NAS** is a novel differentiable NAS method for designing hardware-efficient ConvNets in less than 4 hours. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('spnasnet_100', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `spnasnet_100`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('spnasnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{stamoulis2019singlepath, title={Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours}, author={Dimitrios Stamoulis and Ruizhou Ding and Di Wang and Dimitrios Lymberopoulos and Bodhi Priyantha and Jie Liu and Diana Marculescu}, year={2019}, eprint={1904.02877}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` <!-- Type: model-index Collections: - Name: SPNASNet Paper: Title: 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours' URL: https://paperswithcode.com/paper/single-path-nas-designing-hardware-efficient Models: - Name: spnasnet_100 In Collection: SPNASNet Metadata: FLOPs: 442385600 Parameters: 4420000 File Size: 17902337 Architecture: - Average Pooling - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - ReLU Tasks: - Image Classification Training Data: - ImageNet ID: spnasnet_100 Crop Pct: '0.875' Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L995 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 74.08% Top 5 Accuracy: 91.82% -->
pytorch-image-models/hfdocs/source/models/spnasnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/spnasnet.mdx", "repo_id": "pytorch-image-models", "token_count": 1510 }
# Optimization This page contains the API reference documentation for learning rate optimizers included in `timm`. ## Optimizers ### Factory functions [[autodoc]] timm.optim.create_optimizer_v2 [[autodoc]] timm.optim.list_optimizers [[autodoc]] timm.optim.get_optimizer_class ### Optimizer Classes [[autodoc]] timm.optim.adabelief.AdaBelief [[autodoc]] timm.optim.adafactor.Adafactor [[autodoc]] timm.optim.adafactor_bv.AdafactorBigVision [[autodoc]] timm.optim.adahessian.Adahessian [[autodoc]] timm.optim.adamp.AdamP [[autodoc]] timm.optim.adan.Adan [[autodoc]] timm.optim.adopt.Adopt [[autodoc]] timm.optim.lamb.Lamb [[autodoc]] timm.optim.laprop.LaProp [[autodoc]] timm.optim.lars.Lars [[autodoc]] timm.optim.lion.Lion [[autodoc]] timm.optim.lookahead.Lookahead [[autodoc]] timm.optim.madgrad.MADGRAD [[autodoc]] timm.optim.mars.Mars [[autodoc]] timm.optim.nadamw.NAdamW [[autodoc]] timm.optim.nvnovograd.NvNovoGrad [[autodoc]] timm.optim.rmsprop_tf.RMSpropTF [[autodoc]] timm.optim.sgdp.SGDP [[autodoc]] timm.optim.sgdw.SGDW
pytorch-image-models/hfdocs/source/reference/optimizers.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/reference/optimizers.mdx", "repo_id": "pytorch-image-models", "token_count": 425 }
import os from typing import Optional from .reader_image_folder import ReaderImageFolder from .reader_image_in_tar import ReaderImageInTar def create_reader( name: str, root: Optional[str] = None, split: str = 'train', **kwargs, ): kwargs = {k: v for k, v in kwargs.items() if v is not None} name = name.lower() name = name.split('/', 1) prefix = '' if len(name) > 1: prefix = name[0] name = name[-1] # FIXME improve the selection right now just tfds prefix or fallback path, will need options to # explicitly select other options shortly if prefix == 'hfds': from .reader_hfds import ReaderHfds # defer Hf datasets import reader = ReaderHfds(name=name, root=root, split=split, **kwargs) elif prefix == 'hfids': from .reader_hfids import ReaderHfids # defer HF datasets import reader = ReaderHfids(name=name, root=root, split=split, **kwargs) elif prefix == 'tfds': from .reader_tfds import ReaderTfds # defer tensorflow import reader = ReaderTfds(name=name, root=root, split=split, **kwargs) elif prefix == 'wds': from .reader_wds import ReaderWds kwargs.pop('download', False) reader = ReaderWds(root=root, name=name, split=split, **kwargs) else: assert os.path.exists(root) # default fallback path (backwards compat), use image tar if root is a .tar file, otherwise image folder # FIXME support split here or in reader? if os.path.isfile(root) and os.path.splitext(root)[1] == '.tar': reader = ReaderImageInTar(root, **kwargs) else: reader = ReaderImageFolder(root, **kwargs) return reader
pytorch-image-models/timm/data/readers/reader_factory.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/reader_factory.py", "repo_id": "pytorch-image-models", "token_count": 694 }
""" PyTorch selectable adaptive pooling Adaptive pooling with the ability to select the type of pooling from: * 'avg' - Average pooling * 'max' - Max pooling * 'avgmax' - Sum of average and max pooling re-scaled by 0.5 * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim Both a functional and a nn.Module version of the pooling is provided. Hacked together by / Copyright 2020 Ross Wightman """ from typing import Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from .format import get_spatial_dim, get_channel_dim _int_tuple_2_t = Union[int, Tuple[int, int]] def adaptive_pool_feat_mult(pool_type='avg'): if pool_type.endswith('catavgmax'): return 2 else: return 1 def adaptive_avgmax_pool2d(x, output_size: _int_tuple_2_t = 1): x_avg = F.adaptive_avg_pool2d(x, output_size) x_max = F.adaptive_max_pool2d(x, output_size) return 0.5 * (x_avg + x_max) def adaptive_catavgmax_pool2d(x, output_size: _int_tuple_2_t = 1): x_avg = F.adaptive_avg_pool2d(x, output_size) x_max = F.adaptive_max_pool2d(x, output_size) return torch.cat((x_avg, x_max), 1) def select_adaptive_pool2d(x, pool_type='avg', output_size: _int_tuple_2_t = 1): """Selectable global pooling function with dynamic input kernel size """ if pool_type == 'avg': x = F.adaptive_avg_pool2d(x, output_size) elif pool_type == 'avgmax': x = adaptive_avgmax_pool2d(x, output_size) elif pool_type == 'catavgmax': x = adaptive_catavgmax_pool2d(x, output_size) elif pool_type == 'max': x = F.adaptive_max_pool2d(x, output_size) else: assert False, 'Invalid pool type: %s' % pool_type return x class FastAdaptiveAvgPool(nn.Module): def __init__(self, flatten: bool = False, input_fmt: F = 'NCHW'): super(FastAdaptiveAvgPool, self).__init__() self.flatten = flatten self.dim = get_spatial_dim(input_fmt) def forward(self, x): return x.mean(self.dim, keepdim=not self.flatten) class FastAdaptiveMaxPool(nn.Module): def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'): super(FastAdaptiveMaxPool, self).__init__() self.flatten = flatten self.dim = get_spatial_dim(input_fmt) def forward(self, x): return x.amax(self.dim, keepdim=not self.flatten) class FastAdaptiveAvgMaxPool(nn.Module): def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'): super(FastAdaptiveAvgMaxPool, self).__init__() self.flatten = flatten self.dim = get_spatial_dim(input_fmt) def forward(self, x): x_avg = x.mean(self.dim, keepdim=not self.flatten) x_max = x.amax(self.dim, keepdim=not self.flatten) return 0.5 * x_avg + 0.5 * x_max class FastAdaptiveCatAvgMaxPool(nn.Module): def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'): super(FastAdaptiveCatAvgMaxPool, self).__init__() self.flatten = flatten self.dim_reduce = get_spatial_dim(input_fmt) if flatten: self.dim_cat = 1 else: self.dim_cat = get_channel_dim(input_fmt) def forward(self, x): x_avg = x.mean(self.dim_reduce, keepdim=not self.flatten) x_max = x.amax(self.dim_reduce, keepdim=not self.flatten) return torch.cat((x_avg, x_max), self.dim_cat) class AdaptiveAvgMaxPool2d(nn.Module): def __init__(self, output_size: _int_tuple_2_t = 1): super(AdaptiveAvgMaxPool2d, self).__init__() self.output_size = output_size def forward(self, x): return adaptive_avgmax_pool2d(x, self.output_size) class AdaptiveCatAvgMaxPool2d(nn.Module): def __init__(self, output_size: _int_tuple_2_t = 1): super(AdaptiveCatAvgMaxPool2d, self).__init__() self.output_size = output_size def forward(self, x): return adaptive_catavgmax_pool2d(x, self.output_size) class SelectAdaptivePool2d(nn.Module): """Selectable global pooling layer with dynamic input kernel size """ def __init__( self, output_size: _int_tuple_2_t = 1, pool_type: str = 'fast', flatten: bool = False, input_fmt: str = 'NCHW', ): super(SelectAdaptivePool2d, self).__init__() assert input_fmt in ('NCHW', 'NHWC') self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing pool_type = pool_type.lower() if not pool_type: self.pool = nn.Identity() # pass through self.flatten = nn.Flatten(1) if flatten else nn.Identity() elif pool_type.startswith('fast') or input_fmt != 'NCHW': assert output_size == 1, 'Fast pooling and non NCHW input formats require output_size == 1.' if pool_type.endswith('catavgmax'): self.pool = FastAdaptiveCatAvgMaxPool(flatten, input_fmt=input_fmt) elif pool_type.endswith('avgmax'): self.pool = FastAdaptiveAvgMaxPool(flatten, input_fmt=input_fmt) elif pool_type.endswith('max'): self.pool = FastAdaptiveMaxPool(flatten, input_fmt=input_fmt) elif pool_type == 'fast' or pool_type.endswith('avg'): self.pool = FastAdaptiveAvgPool(flatten, input_fmt=input_fmt) else: assert False, 'Invalid pool type: %s' % pool_type self.flatten = nn.Identity() else: assert input_fmt == 'NCHW' if pool_type == 'avgmax': self.pool = AdaptiveAvgMaxPool2d(output_size) elif pool_type == 'catavgmax': self.pool = AdaptiveCatAvgMaxPool2d(output_size) elif pool_type == 'max': self.pool = nn.AdaptiveMaxPool2d(output_size) elif pool_type == 'avg': self.pool = nn.AdaptiveAvgPool2d(output_size) else: assert False, 'Invalid pool type: %s' % pool_type self.flatten = nn.Flatten(1) if flatten else nn.Identity() def is_identity(self): return not self.pool_type def forward(self, x): x = self.pool(x) x = self.flatten(x) return x def feat_mult(self): return adaptive_pool_feat_mult(self.pool_type) def __repr__(self): return self.__class__.__name__ + '(' \ + 'pool_type=' + self.pool_type \ + ', flatten=' + str(self.flatten) + ')'
pytorch-image-models/timm/layers/adaptive_avgmax_pool.py/0
{ "file_path": "pytorch-image-models/timm/layers/adaptive_avgmax_pool.py", "repo_id": "pytorch-image-models", "token_count": 3039 }
""" NormAct (Normalization + Activation Layer) Factory Create norm + act combo modules that attempt to be backwards compatible with separate norm + act instances in models. Where these are used it will be possible to swap separate BN + act layers with combined modules like IABN or EvoNorms. Hacked together by / Copyright 2020 Ross Wightman """ import types import functools from .evo_norm import * from .filter_response_norm import FilterResponseNormAct2d, FilterResponseNormTlu2d from .norm_act import BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d from .inplace_abn import InplaceAbn _NORM_ACT_MAP = dict( batchnorm=BatchNormAct2d, batchnorm2d=BatchNormAct2d, groupnorm=GroupNormAct, groupnorm1=functools.partial(GroupNormAct, num_groups=1), layernorm=LayerNormAct, layernorm2d=LayerNormAct2d, evonormb0=EvoNorm2dB0, evonormb1=EvoNorm2dB1, evonormb2=EvoNorm2dB2, evonorms0=EvoNorm2dS0, evonorms0a=EvoNorm2dS0a, evonorms1=EvoNorm2dS1, evonorms1a=EvoNorm2dS1a, evonorms2=EvoNorm2dS2, evonorms2a=EvoNorm2dS2a, frn=FilterResponseNormAct2d, frntlu=FilterResponseNormTlu2d, inplaceabn=InplaceAbn, iabn=InplaceAbn, ) _NORM_ACT_TYPES = {m for n, m in _NORM_ACT_MAP.items()} # has act_layer arg to define act type _NORM_ACT_REQUIRES_ARG = { BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d, FilterResponseNormAct2d, InplaceAbn} def create_norm_act_layer(layer_name, num_features, act_layer=None, apply_act=True, jit=False, **kwargs): layer = get_norm_act_layer(layer_name, act_layer=act_layer) layer_instance = layer(num_features, apply_act=apply_act, **kwargs) if jit: layer_instance = torch.jit.script(layer_instance) return layer_instance def get_norm_act_layer(norm_layer, act_layer=None): if norm_layer is None: return None assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial)) norm_act_kwargs = {} # unbind partial fn, so args can be rebound later if isinstance(norm_layer, functools.partial): norm_act_kwargs.update(norm_layer.keywords) norm_layer = norm_layer.func if isinstance(norm_layer, str): if not norm_layer: return None layer_name = norm_layer.replace('_', '').lower().split('-')[0] norm_act_layer = _NORM_ACT_MAP[layer_name] elif norm_layer in _NORM_ACT_TYPES: norm_act_layer = norm_layer elif isinstance(norm_layer, types.FunctionType): # if function type, must be a lambda/fn that creates a norm_act layer norm_act_layer = norm_layer else: type_name = norm_layer.__name__.lower() if type_name.startswith('batchnorm'): norm_act_layer = BatchNormAct2d elif type_name.startswith('groupnorm'): norm_act_layer = GroupNormAct elif type_name.startswith('groupnorm1'): norm_act_layer = functools.partial(GroupNormAct, num_groups=1) elif type_name.startswith('layernorm2d'): norm_act_layer = LayerNormAct2d elif type_name.startswith('layernorm'): norm_act_layer = LayerNormAct else: assert False, f"No equivalent norm_act layer for {type_name}" if norm_act_layer in _NORM_ACT_REQUIRES_ARG: # pass `act_layer` through for backwards compat where `act_layer=None` implies no activation. # In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types norm_act_kwargs.setdefault('act_layer', act_layer) if norm_act_kwargs: norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args return norm_act_layer
pytorch-image-models/timm/layers/create_norm_act.py/0
{ "file_path": "pytorch-image-models/timm/layers/create_norm_act.py", "repo_id": "pytorch-image-models", "token_count": 1591 }
""" Lambda Layer Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` - https://arxiv.org/abs/2102.08602 @misc{2102.08602, Author = {Irwan Bello}, Title = {LambdaNetworks: Modeling Long-Range Interactions Without Attention}, Year = {2021}, } Status: This impl is a WIP. Code snippets in the paper were used as reference but good chance some details are missing/wrong. I've only implemented local lambda conv based pos embeddings. For a PyTorch impl that includes other embedding options checkout https://github.com/lucidrains/lambda-networks Hacked together by / Copyright 2021 Ross Wightman """ import torch from torch import nn import torch.nn.functional as F from .grid import ndgrid from .helpers import to_2tuple, make_divisible from .weight_init import trunc_normal_ def rel_pos_indices(size): size = to_2tuple(size) pos = torch.stack(ndgrid(torch.arange(size[0]), torch.arange(size[1]))).flatten(1) rel_pos = pos[:, None, :] - pos[:, :, None] rel_pos[0] += size[0] - 1 rel_pos[1] += size[1] - 1 return rel_pos # 2, H * W, H * W class LambdaLayer(nn.Module): """Lambda Layer Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` - https://arxiv.org/abs/2102.08602 NOTE: intra-depth parameter 'u' is fixed at 1. It did not appear worth the complexity to add. The internal dimensions of the lambda module are controlled via the interaction of several arguments. * the output dimension of the module is specified by dim_out, which falls back to input dim if not set * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim * the query (q) and key (k) dimension are determined by * dim_head = (dim_out * attn_ratio // num_heads) if dim_head is None * q = num_heads * dim_head, k = dim_head * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not set Args: dim (int): input dimension to the module dim_out (int): output dimension of the module, same as dim if not set feat_size (Tuple[int, int]): size of input feature_map for relative pos variant H, W stride (int): output stride of the module, avg pool used if stride == 2 num_heads (int): parallel attention heads. dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set r (int): local lambda convolution radius. Use lambda conv if set, else relative pos if not. (default: 9) qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) qkv_bias (bool): add bias to q, k, and v projections """ def __init__( self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=16, r=9, qk_ratio=1.0, qkv_bias=False): super().__init__() dim_out = dim_out or dim assert dim_out % num_heads == 0, ' should be divided by num_heads' self.dim_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads self.num_heads = num_heads self.dim_v = dim_out // num_heads self.qkv = nn.Conv2d( dim, num_heads * self.dim_qk + self.dim_qk + self.dim_v, kernel_size=1, bias=qkv_bias) self.norm_q = nn.BatchNorm2d(num_heads * self.dim_qk) self.norm_v = nn.BatchNorm2d(self.dim_v) if r is not None: # local lambda convolution for pos self.conv_lambda = nn.Conv3d(1, self.dim_qk, (r, r, 1), padding=(r // 2, r // 2, 0)) self.pos_emb = None self.rel_pos_indices = None else: # relative pos embedding assert feat_size is not None feat_size = to_2tuple(feat_size) rel_size = [2 * s - 1 for s in feat_size] self.conv_lambda = None self.pos_emb = nn.Parameter(torch.zeros(rel_size[0], rel_size[1], self.dim_qk)) self.register_buffer('rel_pos_indices', rel_pos_indices(feat_size), persistent=False) self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() self.reset_parameters() def reset_parameters(self): trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in if self.conv_lambda is not None: trunc_normal_(self.conv_lambda.weight, std=self.dim_qk ** -0.5) if self.pos_emb is not None: trunc_normal_(self.pos_emb, std=.02) def forward(self, x): B, C, H, W = x.shape M = H * W qkv = self.qkv(x) q, k, v = torch.split(qkv, [ self.num_heads * self.dim_qk, self.dim_qk, self.dim_v], dim=1) q = self.norm_q(q).reshape(B, self.num_heads, self.dim_qk, M).transpose(-1, -2) # B, num_heads, M, K v = self.norm_v(v).reshape(B, self.dim_v, M).transpose(-1, -2) # B, M, V k = F.softmax(k.reshape(B, self.dim_qk, M), dim=-1) # B, K, M content_lam = k @ v # B, K, V content_out = q @ content_lam.unsqueeze(1) # B, num_heads, M, V if self.pos_emb is None: position_lam = self.conv_lambda(v.reshape(B, 1, H, W, self.dim_v)) # B, H, W, V, K position_lam = position_lam.reshape(B, 1, self.dim_qk, H * W, self.dim_v).transpose(2, 3) # B, 1, M, K, V else: # FIXME relative pos embedding path not fully verified pos_emb = self.pos_emb[self.rel_pos_indices[0], self.rel_pos_indices[1]].expand(B, -1, -1, -1) position_lam = (pos_emb.transpose(-1, -2) @ v.unsqueeze(1)).unsqueeze(1) # B, 1, M, K, V position_out = (q.unsqueeze(-2) @ position_lam).squeeze(-2) # B, num_heads, M, V out = (content_out + position_out).transpose(-1, -2).reshape(B, C, H, W) # B, C (num_heads * V), H, W out = self.pool(out) return out
pytorch-image-models/timm/layers/lambda_layer.py/0
{ "file_path": "pytorch-image-models/timm/layers/lambda_layer.py", "repo_id": "pytorch-image-models", "token_count": 2611 }
""" Sin-cos, fourier, rotary position embedding modules and functions Hacked together by / Copyright 2022 Ross Wightman """ import math from typing import List, Tuple, Optional, Union import torch from torch import nn as nn from .grid import ndgrid from .trace_utils import _assert def pixel_freq_bands( num_bands: int, max_freq: float = 224., linear_bands: bool = True, device: Optional[torch.device] = None, ): if linear_bands: bands = torch.linspace(1.0, max_freq / 2, num_bands, dtype=torch.float32, device=device) else: bands = 2 ** torch.linspace(0, math.log(max_freq, 2) - 1, num_bands, dtype=torch.float32, device=device) return bands * torch.pi def freq_bands( num_bands: int, temperature: float = 10000., step: int = 2, device: Optional[torch.device] = None, ) -> torch.Tensor: exp = torch.arange(0, num_bands, step, dtype=torch.int64, device=device).to(torch.float32) / num_bands bands = 1. / (temperature ** exp) return bands def build_sincos2d_pos_embed( feat_shape: List[int], dim: int = 64, temperature: float = 10000., reverse_coord: bool = False, interleave_sin_cos: bool = False, dtype: torch.dtype = torch.float32, device: Optional[torch.device] = None ) -> torch.Tensor: """ Args: feat_shape: dim: temperature: reverse_coord: stack grid order W, H instead of H, W interleave_sin_cos: sin, cos, sin, cos stack instead of sin, sin, cos, cos dtype: device: Returns: """ assert dim % 4 == 0, 'Embed dimension must be divisible by 4 for sin-cos 2D position embedding' pos_dim = dim // 4 bands = freq_bands(pos_dim, temperature=temperature, step=1, device=device) if reverse_coord: feat_shape = feat_shape[::-1] # stack W, H instead of H, W grid = torch.stack(ndgrid([ torch.arange(s, device=device, dtype=torch.int64).to(torch.float32) for s in feat_shape ])).flatten(1).transpose(0, 1) pos2 = grid.unsqueeze(-1) * bands.unsqueeze(0) # FIXME add support for unflattened spatial dim? stack_dim = 2 if interleave_sin_cos else 1 # stack sin, cos, sin, cos instead of sin sin cos cos pos_emb = torch.stack([torch.sin(pos2), torch.cos(pos2)], dim=stack_dim).flatten(1) return pos_emb.to(dtype=dtype) def build_fourier_pos_embed( feat_shape: List[int], bands: Optional[torch.Tensor] = None, num_bands: int = 64, max_res: int = 224, temperature: float = 10000., linear_bands: bool = False, include_grid: bool = False, in_pixels: bool = True, ref_feat_shape: Optional[List[int]] = None, dtype: torch.dtype = torch.float32, device: Optional[torch.device] = None, ) -> List[torch.Tensor]: """ Args: feat_shape: Feature shape for embedding. bands: Pre-calculated frequency bands. num_bands: Number of frequency bands (determines output dim). max_res: Maximum resolution for pixel based freq. temperature: Temperature for non-pixel freq. linear_bands: Linear band spacing for pixel based freq. include_grid: Include the spatial grid in output. in_pixels: Output in pixel freq. ref_feat_shape: Reference feature shape for resize / fine-tune. dtype: Output dtype. device: Output device. Returns: """ if bands is None: if in_pixels: bands = pixel_freq_bands( num_bands, float(max_res), linear_bands=linear_bands, device=device, ) else: bands = freq_bands( num_bands, temperature=temperature, step=1, device=device, ) else: if device is None: device = bands.device if dtype is None: dtype = bands.dtype if in_pixels: t = [torch.linspace(-1., 1., steps=s, device=device, dtype=torch.float32) for s in feat_shape] else: t = [torch.arange(s, device=device, dtype=torch.int64).to(torch.float32) for s in feat_shape] if ref_feat_shape is not None: # eva's scheme for resizing rope embeddings (ref shape = pretrain) t = [x / f * r for x, f, r in zip(t, feat_shape, ref_feat_shape)] grid = torch.stack(ndgrid(t), dim=-1) grid = grid.unsqueeze(-1) pos = grid * bands pos_sin, pos_cos = pos.sin().to(dtype=dtype), pos.cos().to(dtype) out = [grid, pos_sin, pos_cos] if include_grid else [pos_sin, pos_cos] return out class FourierEmbed(nn.Module): def __init__( self, max_res: int = 224, num_bands: int = 64, concat_grid=True, keep_spatial=False, ): super().__init__() self.max_res = max_res self.num_bands = num_bands self.concat_grid = concat_grid self.keep_spatial = keep_spatial self.register_buffer( 'bands', pixel_freq_bands(max_res, num_bands), persistent=False, ) def forward(self, x): B, C = x.shape[:2] feat_shape = x.shape[2:] emb = build_fourier_pos_embed( feat_shape, self.bands, include_grid=self.concat_grid, dtype=x.dtype, device=x.device, ) emb = torch.cat(emb, dim=-1) emb = emb.transpose(-1, -2).flatten(len(feat_shape)) batch_expand = (B,) + (-1,) * (x.ndim - 1) # FIXME support nD if self.keep_spatial: x = torch.cat([x, emb.unsqueeze(0).expand(batch_expand).permute(0, 3, 1, 2)], dim=1) else: x = torch.cat([x.permute(0, 2, 3, 1), emb.unsqueeze(0).expand(batch_expand)], dim=-1) x = x.reshape(B, feat_shape.numel(), -1) return x def rot(x): return torch.stack([-x[..., 1::2], x[..., ::2]], -1).reshape(x.shape) def apply_rot_embed(x: torch.Tensor, sin_emb, cos_emb): if sin_emb.ndim == 3: return x * cos_emb.unsqueeze(1).expand_as(x) + rot(x) * sin_emb.unsqueeze(1).expand_as(x) return x * cos_emb + rot(x) * sin_emb def apply_rot_embed_list(x: List[torch.Tensor], sin_emb, cos_emb): if isinstance(x, torch.Tensor): x = [x] return [t * cos_emb + rot(t) * sin_emb for t in x] def apply_rot_embed_cat(x: torch.Tensor, emb): sin_emb, cos_emb = emb.tensor_split(2, -1) if sin_emb.ndim == 3: return x * cos_emb.unsqueeze(1).expand_as(x) + rot(x) * sin_emb.unsqueeze(1).expand_as(x) return x * cos_emb + rot(x) * sin_emb def apply_keep_indices_nlc(x, pos_embed, keep_indices): pos_embed = pos_embed.unsqueeze(0).expand(x.shape[0], -1, -1) pos_embed = pos_embed.gather(1, keep_indices.unsqueeze(-1).expand(-1, -1, pos_embed.shape[-1])) return pos_embed def build_rotary_pos_embed( feat_shape: List[int], bands: Optional[torch.Tensor] = None, dim: int = 64, max_res: int = 224, temperature: float = 10000., linear_bands: bool = False, in_pixels: bool = True, ref_feat_shape: Optional[List[int]] = None, dtype: torch.dtype = torch.float32, device: Optional[torch.device] = None, ): """ Args: feat_shape: Spatial shape of the target tensor for embedding. bands: Optional pre-generated frequency bands dim: Output dimension of embedding tensor. max_res: Maximum resolution for pixel mode. temperature: Temperature (inv freq) for non-pixel mode linear_bands: Linearly (instead of log) spaced bands for pixel mode in_pixels: Pixel vs language (inv freq) mode. dtype: Output dtype. device: Output device. Returns: """ sin_emb, cos_emb = build_fourier_pos_embed( feat_shape, bands=bands, num_bands=dim // 4, max_res=max_res, temperature=temperature, linear_bands=linear_bands, in_pixels=in_pixels, ref_feat_shape=ref_feat_shape, device=device, dtype=dtype, ) num_spatial_dim = 1 # this would be much nicer as a .numel() call to torch.Size(), but torchscript sucks for x in feat_shape: num_spatial_dim *= x sin_emb = sin_emb.reshape(num_spatial_dim, -1).repeat_interleave(2, -1) cos_emb = cos_emb.reshape(num_spatial_dim, -1).repeat_interleave(2, -1) return sin_emb, cos_emb class RotaryEmbedding(nn.Module): """ Rotary position embedding NOTE: This is my initial attempt at impl rotary embedding for spatial use, it has not been well tested, and will likely change. It will be moved to its own file. The following impl/resources were referenced for this impl: * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py * https://blog.eleuther.ai/rotary-embeddings/ """ def __init__( self, dim, max_res=224, temperature=10000, in_pixels=True, linear_bands: bool = False, feat_shape: Optional[List[int]] = None, ref_feat_shape: Optional[List[int]] = None, ): super().__init__() self.dim = dim self.max_res = max_res self.temperature = temperature self.in_pixels = in_pixels self.feat_shape = feat_shape self.ref_feat_shape = ref_feat_shape if feat_shape is None: # only cache bands if in_pixels: bands = pixel_freq_bands( dim // 4, float(max_res), linear_bands=linear_bands, ) else: bands = freq_bands( dim // 4, temperature=temperature, step=1, ) self.register_buffer( 'bands', bands, persistent=False, ) self.pos_embed_sin = None self.pos_embed_cos = None else: # cache full sin/cos embeddings if shape provided up front emb_sin, emb_cos = build_rotary_pos_embed( feat_shape=feat_shape, dim=dim, max_res=max_res, linear_bands=linear_bands, in_pixels=in_pixels, ref_feat_shape=self.ref_feat_shape, ) self.bands = None self.register_buffer( 'pos_embed_sin', emb_sin, persistent=False, ) self.register_buffer( 'pos_embed_cos', emb_cos, persistent=False, ) def get_embed(self, shape: Optional[List[int]] = None): if self.bands is not None: # rebuild embeddings every call, use if target shape changes assert shape is not None return build_rotary_pos_embed( shape, self.bands, in_pixels=self.in_pixels, ) else: return self.pos_embed_sin, self.pos_embed_cos def forward(self, x): # assuming channel-first tensor where spatial dim are >= 2 sin_emb, cos_emb = self.get_embed(x.shape[2:]) return apply_rot_embed(x, sin_emb, cos_emb) class RotaryEmbeddingCat(nn.Module): """ Rotary position embedding w/ concatenatd sin & cos The following impl/resources were referenced for this impl: * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py * https://blog.eleuther.ai/rotary-embeddings/ """ def __init__( self, dim, max_res=224, temperature=10000, in_pixels=True, linear_bands: bool = False, feat_shape: Optional[List[int]] = None, ref_feat_shape: Optional[List[int]] = None, ): super().__init__() self.dim = dim self.max_res = max_res self.temperature = temperature self.in_pixels = in_pixels self.feat_shape = feat_shape self.ref_feat_shape = ref_feat_shape if feat_shape is None: # only cache bands if in_pixels: bands = pixel_freq_bands( dim // 4, float(max_res), linear_bands=linear_bands, ) else: bands = freq_bands( dim // 4, temperature=temperature, step=1, ) self.register_buffer( 'bands', bands, persistent=False, ) self.pos_embed = None else: # cache full sin/cos embeddings if shape provided up front embeds = build_rotary_pos_embed( feat_shape=feat_shape, dim=dim, max_res=max_res, linear_bands=linear_bands, in_pixels=in_pixels, ref_feat_shape=self.ref_feat_shape, ) self.bands = None self.register_buffer( 'pos_embed', torch.cat(embeds, -1), persistent=False, ) def get_embed(self, shape: Optional[List[int]] = None): if self.bands is not None and shape is not None: # rebuild embeddings every call, use if target shape changes embeds = build_rotary_pos_embed( shape, self.bands, in_pixels=self.in_pixels, ref_feat_shape=self.ref_feat_shape, ) return torch.cat(embeds, -1) elif self.pos_embed is not None: return self.pos_embed else: assert False, "get_embed() requires pre-computed pos_embed or valid shape w/ pre-computed bands" def forward(self, x): # assuming channel-first tensor where spatial dim are >= 2 pos_embed = self.get_embed(x.shape[2:]) return apply_rot_embed_cat(x, pos_embed)
pytorch-image-models/timm/layers/pos_embed_sincos.py/0
{ "file_path": "pytorch-image-models/timm/layers/pos_embed_sincos.py", "repo_id": "pytorch-image-models", "token_count": 7180 }
import torch import torch.nn as nn import torch.nn.functional as F from .cross_entropy import LabelSmoothingCrossEntropy class JsdCrossEntropy(nn.Module): """ Jensen-Shannon Divergence + Cross-Entropy Loss Based on impl here: https://github.com/google-research/augmix/blob/master/imagenet.py From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - https://arxiv.org/abs/1912.02781 Hacked together by / Copyright 2020 Ross Wightman """ def __init__(self, num_splits=3, alpha=12, smoothing=0.1): super().__init__() self.num_splits = num_splits self.alpha = alpha if smoothing is not None and smoothing > 0: self.cross_entropy_loss = LabelSmoothingCrossEntropy(smoothing) else: self.cross_entropy_loss = torch.nn.CrossEntropyLoss() def __call__(self, output, target): split_size = output.shape[0] // self.num_splits assert split_size * self.num_splits == output.shape[0] logits_split = torch.split(output, split_size) # Cross-entropy is only computed on clean images loss = self.cross_entropy_loss(logits_split[0], target[:split_size]) probs = [F.softmax(logits, dim=1) for logits in logits_split] # Clamp mixture distribution to avoid exploding KL divergence logp_mixture = torch.clamp(torch.stack(probs).mean(axis=0), 1e-7, 1).log() loss += self.alpha * sum([F.kl_div( logp_mixture, p_split, reduction='batchmean') for p_split in probs]) / len(probs) return loss
pytorch-image-models/timm/loss/jsd.py/0
{ "file_path": "pytorch-image-models/timm/loss/jsd.py", "repo_id": "pytorch-image-models", "token_count": 639 }
""" Deep Layer Aggregation and DLA w/ Res2Net DLA original adapted from Official Pytorch impl at: https://github.com/ucbdrive/dla DLA Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484 Res2Net additions from: https://github.com/gasvn/Res2Net/ Res2Net Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 """ import math from typing import List, Optional import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['DLA'] class DlaBasic(nn.Module): """DLA Basic""" def __init__(self, inplanes, planes, stride=1, dilation=1, **_): super(DlaBasic, self).__init__() self.conv1 = nn.Conv2d( inplanes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d( planes, planes, kernel_size=3, stride=1, padding=dilation, bias=False, dilation=dilation) self.bn2 = nn.BatchNorm2d(planes) self.stride = stride def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None): if shortcut is None: shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out += shortcut out = self.relu(out) return out class DlaBottleneck(nn.Module): """DLA/DLA-X Bottleneck""" expansion = 2 def __init__(self, inplanes, outplanes, stride=1, dilation=1, cardinality=1, base_width=64): super(DlaBottleneck, self).__init__() self.stride = stride mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) mid_planes = mid_planes // self.expansion self.conv1 = nn.Conv2d(inplanes, mid_planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(mid_planes) self.conv2 = nn.Conv2d( mid_planes, mid_planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation, groups=cardinality) self.bn2 = nn.BatchNorm2d(mid_planes) self.conv3 = nn.Conv2d(mid_planes, outplanes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(outplanes) self.relu = nn.ReLU(inplace=True) def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None): if shortcut is None: shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) out += shortcut out = self.relu(out) return out class DlaBottle2neck(nn.Module): """ Res2Net/Res2NeXT DLA Bottleneck Adapted from https://github.com/gasvn/Res2Net/blob/master/dla.py """ expansion = 2 def __init__(self, inplanes, outplanes, stride=1, dilation=1, scale=4, cardinality=8, base_width=4): super(DlaBottle2neck, self).__init__() self.is_first = stride > 1 self.scale = scale mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) mid_planes = mid_planes // self.expansion self.width = mid_planes self.conv1 = nn.Conv2d(inplanes, mid_planes * scale, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(mid_planes * scale) num_scale_convs = max(1, scale - 1) convs = [] bns = [] for _ in range(num_scale_convs): convs.append(nn.Conv2d( mid_planes, mid_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=cardinality, bias=False)) bns.append(nn.BatchNorm2d(mid_planes)) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) if self.is_first else None self.conv3 = nn.Conv2d(mid_planes * scale, outplanes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(outplanes) self.relu = nn.ReLU(inplace=True) def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None): if shortcut is None: shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) spx = torch.split(out, self.width, 1) spo = [] sp = spx[0] # redundant, for torchscript for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): if i == 0 or self.is_first: sp = spx[i] else: sp = sp + spx[i] sp = conv(sp) sp = bn(sp) sp = self.relu(sp) spo.append(sp) if self.scale > 1: if self.pool is not None: # self.is_first == True, None check for torchscript spo.append(self.pool(spx[-1])) else: spo.append(spx[-1]) out = torch.cat(spo, 1) out = self.conv3(out) out = self.bn3(out) out += shortcut out = self.relu(out) return out class DlaRoot(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, shortcut): super(DlaRoot, self).__init__() self.conv = nn.Conv2d( in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2) self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) self.shortcut = shortcut def forward(self, x_children: List[torch.Tensor]): x = self.conv(torch.cat(x_children, 1)) x = self.bn(x) if self.shortcut: x += x_children[0] x = self.relu(x) return x class DlaTree(nn.Module): def __init__( self, levels, block, in_channels, out_channels, stride=1, dilation=1, cardinality=1, base_width=64, level_root=False, root_dim=0, root_kernel_size=1, root_shortcut=False, ): super(DlaTree, self).__init__() if root_dim == 0: root_dim = 2 * out_channels if level_root: root_dim += in_channels self.downsample = nn.MaxPool2d(stride, stride=stride) if stride > 1 else nn.Identity() self.project = nn.Identity() cargs = dict(dilation=dilation, cardinality=cardinality, base_width=base_width) if levels == 1: self.tree1 = block(in_channels, out_channels, stride, **cargs) self.tree2 = block(out_channels, out_channels, 1, **cargs) if in_channels != out_channels: # NOTE the official impl/weights have project layers in levels > 1 case that are never # used, I've moved the project layer here to avoid wasted params but old checkpoints will # need strict=False while loading. self.project = nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(out_channels)) self.root = DlaRoot(root_dim, out_channels, root_kernel_size, root_shortcut) else: cargs.update(dict(root_kernel_size=root_kernel_size, root_shortcut=root_shortcut)) self.tree1 = DlaTree( levels - 1, block, in_channels, out_channels, stride, root_dim=0, **cargs, ) self.tree2 = DlaTree( levels - 1, block, out_channels, out_channels, root_dim=root_dim + out_channels, **cargs, ) self.root = None self.level_root = level_root self.root_dim = root_dim self.levels = levels def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None): if children is None: children = [] bottom = self.downsample(x) shortcut = self.project(bottom) if self.level_root: children.append(bottom) x1 = self.tree1(x, shortcut) if self.root is not None: # levels == 1 x2 = self.tree2(x1) x = self.root([x2, x1] + children) else: children.append(x1) x = self.tree2(x1, None, children) return x class DLA(nn.Module): def __init__( self, levels, channels, output_stride=32, num_classes=1000, in_chans=3, global_pool='avg', cardinality=1, base_width=64, block=DlaBottle2neck, shortcut_root=False, drop_rate=0.0, ): super(DLA, self).__init__() self.channels = channels self.num_classes = num_classes self.cardinality = cardinality self.base_width = base_width assert output_stride == 32 # FIXME support dilation self.base_layer = nn.Sequential( nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False), nn.BatchNorm2d(channels[0]), nn.ReLU(inplace=True), ) self.level0 = self._make_conv_level(channels[0], channels[0], levels[0]) self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2) cargs = dict(cardinality=cardinality, base_width=base_width, root_shortcut=shortcut_root) self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs) self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs) self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs) self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs) self.feature_info = [ dict(num_chs=channels[0], reduction=1, module='level0'), # rare to have a meaningful stride 1 level dict(num_chs=channels[1], reduction=2, module='level1'), dict(num_chs=channels[2], reduction=4, module='level2'), dict(num_chs=channels[3], reduction=8, module='level3'), dict(num_chs=channels[4], reduction=16, module='level4'), dict(num_chs=channels[5], reduction=32, module='level5'), ] self.num_features = self.head_hidden_size = channels[-1] self.global_pool, self.head_drop, self.fc = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, use_conv=True, drop_rate=drop_rate, ) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): modules = [] for i in range(convs): modules.extend([ nn.Conv2d( inplanes, planes, kernel_size=3, stride=stride if i == 0 else 1, padding=dilation, bias=False, dilation=dilation), nn.BatchNorm2d(planes), nn.ReLU(inplace=True)]) inplanes = planes return nn.Sequential(*modules) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^base_layer', blocks=r'^level(\d+)' if coarse else [ # an unusual arch, this achieves somewhat more granularity without getting super messy (r'^level(\d+)\.tree(\d+)', None), (r'^level(\d+)\.root', (2,)), (r'^level(\d+)', (1,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.fc def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes self.global_pool, self.fc = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() def forward_features(self, x): x = self.base_layer(x) x = self.level0(x) x = self.level1(x) x = self.level2(x) x = self.level3(x) x = self.level4(x) x = self.level5(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) if pre_logits: return self.flatten(x) x = self.fc(x) return self.flatten(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_dla(variant, pretrained=False, **kwargs): return build_model_with_cfg( DLA, variant, pretrained, pretrained_strict=False, feature_cfg=dict(out_indices=(1, 2, 3, 4, 5)), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'base_layer.0', 'classifier': 'fc', **kwargs } default_cfgs = generate_default_cfgs({ 'dla34.in1k': _cfg(hf_hub_id='timm/'), 'dla46_c.in1k': _cfg(hf_hub_id='timm/'), 'dla46x_c.in1k': _cfg(hf_hub_id='timm/'), 'dla60x_c.in1k': _cfg(hf_hub_id='timm/'), 'dla60.in1k': _cfg(hf_hub_id='timm/'), 'dla60x.in1k': _cfg(hf_hub_id='timm/'), 'dla102.in1k': _cfg(hf_hub_id='timm/'), 'dla102x.in1k': _cfg(hf_hub_id='timm/'), 'dla102x2.in1k': _cfg(hf_hub_id='timm/'), 'dla169.in1k': _cfg(hf_hub_id='timm/'), 'dla60_res2net.in1k': _cfg(hf_hub_id='timm/'), 'dla60_res2next.in1k': _cfg(hf_hub_id='timm/'), }) @register_model def dla60_res2net(pretrained=False, **kwargs) -> DLA: model_args = dict( levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), block=DlaBottle2neck, cardinality=1, base_width=28) return _create_dla('dla60_res2net', pretrained, **dict(model_args, **kwargs)) @register_model def dla60_res2next(pretrained=False,**kwargs): model_args = dict( levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), block=DlaBottle2neck, cardinality=8, base_width=4) return _create_dla('dla60_res2next', pretrained, **dict(model_args, **kwargs)) @register_model def dla34(pretrained=False, **kwargs) -> DLA: # DLA-34 model_args = dict( levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 128, 256, 512], block=DlaBasic) return _create_dla('dla34', pretrained, **dict(model_args, **kwargs)) @register_model def dla46_c(pretrained=False, **kwargs) -> DLA: # DLA-46-C model_args = dict( levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], block=DlaBottleneck) return _create_dla('dla46_c', pretrained, **dict(model_args, **kwargs)) @register_model def dla46x_c(pretrained=False, **kwargs) -> DLA: # DLA-X-46-C model_args = dict( levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], block=DlaBottleneck, cardinality=32, base_width=4) return _create_dla('dla46x_c', pretrained, **dict(model_args, **kwargs)) @register_model def dla60x_c(pretrained=False, **kwargs) -> DLA: # DLA-X-60-C model_args = dict( levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 64, 64, 128, 256], block=DlaBottleneck, cardinality=32, base_width=4) return _create_dla('dla60x_c', pretrained, **dict(model_args, **kwargs)) @register_model def dla60(pretrained=False, **kwargs) -> DLA: # DLA-60 model_args = dict( levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck) return _create_dla('dla60', pretrained, **dict(model_args, **kwargs)) @register_model def dla60x(pretrained=False, **kwargs) -> DLA: # DLA-X-60 model_args = dict( levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck, cardinality=32, base_width=4) return _create_dla('dla60x', pretrained, **dict(model_args, **kwargs)) @register_model def dla102(pretrained=False, **kwargs) -> DLA: # DLA-102 model_args = dict( levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck, shortcut_root=True) return _create_dla('dla102', pretrained, **dict(model_args, **kwargs)) @register_model def dla102x(pretrained=False, **kwargs) -> DLA: # DLA-X-102 model_args = dict( levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck, cardinality=32, base_width=4, shortcut_root=True) return _create_dla('dla102x', pretrained, **dict(model_args, **kwargs)) @register_model def dla102x2(pretrained=False, **kwargs) -> DLA: # DLA-X-102 64 model_args = dict( levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck, cardinality=64, base_width=4, shortcut_root=True) return _create_dla('dla102x2', pretrained, **dict(model_args, **kwargs)) @register_model def dla169(pretrained=False, **kwargs) -> DLA: # DLA-169 model_args = dict( levels=[1, 1, 2, 3, 5, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck, shortcut_root=True) return _create_dla('dla169', pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/dla.py/0
{ "file_path": "pytorch-image-models/timm/models/dla.py", "repo_id": "pytorch-image-models", "token_count": 9163 }
from functools import partial import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from ._builder import build_model_with_cfg from ._builder import pretrained_cfg_for_features from ._efficientnet_blocks import SqueezeExcite from ._efficientnet_builder import decode_arch_def, resolve_act_layer, resolve_bn_args, round_channels from ._registry import register_model, generate_default_cfgs from .mobilenetv3 import MobileNetV3, MobileNetV3Features __all__ = [] # model_registry will add each entrypoint fn to this def _gen_hardcorenas(pretrained, variant, arch_def, **kwargs): """Creates a hardcorenas model Ref impl: https://github.com/Alibaba-MIIL/HardCoReNAS Paper: https://arxiv.org/abs/2102.11646 """ num_features = 1280 se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) model_kwargs = dict( block_args=decode_arch_def(arch_def), num_features=num_features, stem_size=32, norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_layer=se_layer, **kwargs, ) features_only = False model_cls = MobileNetV3 kwargs_filter = None if model_kwargs.pop('features_only', False): features_only = True kwargs_filter = ('num_classes', 'num_features', 'global_pool', 'head_conv', 'head_bias', 'global_pool') model_cls = MobileNetV3Features model = build_model_with_cfg( model_cls, variant, pretrained, pretrained_strict=not features_only, kwargs_filter=kwargs_filter, **model_kwargs, ) if features_only: model.default_cfg = pretrained_cfg_for_features(model.default_cfg) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'hardcorenas_a.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_b.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_c.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_d.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_e.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_f.miil_green_in1k': _cfg(hf_hub_id='timm/'), }) @register_model def hardcorenas_a(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_A """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25'], ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_a', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_b(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_B """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'], ['ir_r1_k5_s2_e3_c80', 'ir_r1_k5_s1_e3_c80', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], ['ir_r1_k5_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_b', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_c(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_C """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'], ['ir_r1_k5_s2_e4_c80', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_c', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_d(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_D """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25'], ['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25', 'ir_r1_k5_s1_e3_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_e(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_E """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e6_c80_se0.25'], ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e3_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_e', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_f(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_F """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25'], ['ir_r1_k3_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_f', arch_def=arch_def, **kwargs) return model
pytorch-image-models/timm/models/hardcorenas.py/0
{ "file_path": "pytorch-image-models/timm/models/hardcorenas.py", "repo_id": "pytorch-image-models", "token_count": 4629 }
""" MLP-Mixer, ResMLP, and gMLP in PyTorch This impl originally based on MLP-Mixer paper. Official JAX impl: https://github.com/google-research/vision_transformer/blob/linen/vit_jax/models_mixer.py Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 @article{tolstikhin2021, title={MLP-Mixer: An all-MLP Architecture for Vision}, author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and Unterthiner, Thomas and Yung, Jessica and Keysers, Daniel and Uszkoreit, Jakob and Lucic, Mario and Dosovitskiy, Alexey}, journal={arXiv preprint arXiv:2105.01601}, year={2021} } Also supporting ResMlp, and a preliminary (not verified) implementations of gMLP Code: https://github.com/facebookresearch/deit Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 @misc{touvron2021resmlp, title={ResMLP: Feedforward networks for image classification with data-efficient training}, author={Hugo Touvron and Piotr Bojanowski and Mathilde Caron and Matthieu Cord and Alaaeldin El-Nouby and Edouard Grave and Armand Joulin and Gabriel Synnaeve and Jakob Verbeek and Hervé Jégou}, year={2021}, eprint={2105.03404}, } Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 @misc{liu2021pay, title={Pay Attention to MLPs}, author={Hanxiao Liu and Zihang Dai and David R. So and Quoc V. Le}, year={2021}, eprint={2105.08050}, } A thank you to paper authors for releasing code and weights. Hacked together by / Copyright 2021 Ross Wightman """ import math from functools import partial from typing import List, Optional, Union, Tuple import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, GluMlp, GatedMlp, DropPath, lecun_normal_, to_2tuple from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply, checkpoint_seq from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['MixerBlock', 'MlpMixer'] # model_registry will add each entrypoint fn to this class MixerBlock(nn.Module): """ Residual Block w/ token mixing and channel MLPs Based on: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ def __init__( self, dim, seq_len, mlp_ratio=(0.5, 4.0), mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop=0., drop_path=0., ): super().__init__() tokens_dim, channels_dim = [int(x * dim) for x in to_2tuple(mlp_ratio)] self.norm1 = norm_layer(dim) self.mlp_tokens = mlp_layer(seq_len, tokens_dim, act_layer=act_layer, drop=drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) def forward(self, x): x = x + self.drop_path(self.mlp_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) x = x + self.drop_path(self.mlp_channels(self.norm2(x))) return x class Affine(nn.Module): def __init__(self, dim): super().__init__() self.alpha = nn.Parameter(torch.ones((1, 1, dim))) self.beta = nn.Parameter(torch.zeros((1, 1, dim))) def forward(self, x): return torch.addcmul(self.beta, self.alpha, x) class ResBlock(nn.Module): """ Residual MLP block w/ LayerScale and Affine 'norm' Based on: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 """ def __init__( self, dim, seq_len, mlp_ratio=4, mlp_layer=Mlp, norm_layer=Affine, act_layer=nn.GELU, init_values=1e-4, drop=0., drop_path=0., ): super().__init__() channel_dim = int(dim * mlp_ratio) self.norm1 = norm_layer(dim) self.linear_tokens = nn.Linear(seq_len, seq_len) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, drop=drop) self.ls1 = nn.Parameter(init_values * torch.ones(dim)) self.ls2 = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): x = x + self.drop_path(self.ls1 * self.linear_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) x = x + self.drop_path(self.ls2 * self.mlp_channels(self.norm2(x))) return x class SpatialGatingUnit(nn.Module): """ Spatial Gating Unit Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 """ def __init__(self, dim, seq_len, norm_layer=nn.LayerNorm): super().__init__() gate_dim = dim // 2 self.norm = norm_layer(gate_dim) self.proj = nn.Linear(seq_len, seq_len) def init_weights(self): # special init for the projection gate, called as override by base model init nn.init.normal_(self.proj.weight, std=1e-6) nn.init.ones_(self.proj.bias) def forward(self, x): u, v = x.chunk(2, dim=-1) v = self.norm(v) v = self.proj(v.transpose(-1, -2)) return u * v.transpose(-1, -2) class SpatialGatingBlock(nn.Module): """ Residual Block w/ Spatial Gating Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 """ def __init__( self, dim, seq_len, mlp_ratio=4, mlp_layer=GatedMlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop=0., drop_path=0., ): super().__init__() channel_dim = int(dim * mlp_ratio) self.norm = norm_layer(dim) sgu = partial(SpatialGatingUnit, seq_len=seq_len) self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, gate_layer=sgu, drop=drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): x = x + self.drop_path(self.mlp_channels(self.norm(x))) return x class MlpMixer(nn.Module): def __init__( self, num_classes=1000, img_size=224, in_chans=3, patch_size=16, num_blocks=8, embed_dim=512, mlp_ratio=(0.5, 4.0), block_layer=MixerBlock, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop_rate=0., proj_drop_rate=0., drop_path_rate=0., nlhb=False, stem_norm=False, global_pool='avg', ): super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models self.grad_checkpointing = False self.stem = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if stem_norm else None, ) reduction = self.stem.feat_ratio() if hasattr(self.stem, 'feat_ratio') else patch_size # FIXME drop_path (stochastic depth scaling rule or all the same?) self.blocks = nn.Sequential(*[ block_layer( embed_dim, self.stem.num_patches, mlp_ratio, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, drop=proj_drop_rate, drop_path=drop_path_rate, ) for _ in range(num_blocks)]) self.feature_info = [ dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=reduction) for i in range(num_blocks)] self.norm = norm_layer(embed_dim) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() self.init_weights(nlhb=nlhb) @torch.jit.ignore def init_weights(self, nlhb=False): head_bias = -math.log(self.num_classes) if nlhb else 0. named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence return_prefix_tokens: Return both prefix and spatial intermediate tokens norm: Apply norm layer to all intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' reshape = output_fmt == 'NCHW' intermediates = [] take_indices, max_index = feature_take_indices(len(self.blocks), indices) # forward pass B, _, height, width = x.shape x = self.stem(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for i, blk in enumerate(blocks): x = blk(x) if i in take_indices: # normalize intermediates with final norm layer if enabled intermediates.append(self.norm(x) if norm else x) # process intermediates if reshape: # reshape to BCHW output format H, W = self.stem.dynamic_feat_size((height, width)) intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] if intermediates_only: return intermediates x = self.norm(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] # truncate blocks if prune_norm: self.norm = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool == 'avg': x = x.mean(dim=1) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False): """ Mixer weight initialization (trying to match Flax defaults) """ if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) else: if flax: # Flax defaults lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: # like MLP init in vit (my original init) nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-6) else: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): # NOTE if a parent module contains init_weights method, it can override the init of the # child modules as this will be called in depth-first order. module.init_weights() def checkpoint_filter_fn(state_dict, model): """ Remap checkpoints if needed """ if 'patch_embed.proj.weight' in state_dict: # Remap FB ResMlp models -> timm out_dict = {} for k, v in state_dict.items(): k = k.replace('patch_embed.', 'stem.') k = k.replace('attn.', 'linear_tokens.') k = k.replace('mlp.', 'mlp_channels.') k = k.replace('gamma_', 'ls') if k.endswith('.alpha') or k.endswith('.beta'): v = v.reshape(1, 1, -1) out_dict[k] = v return out_dict return state_dict def _create_mixer(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 3) model = build_model_with_cfg( MlpMixer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.875, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'first_conv': 'stem.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'mixer_s32_224.untrained': _cfg(), 'mixer_s16_224.untrained': _cfg(), 'mixer_b32_224.untrained': _cfg(), 'mixer_b16_224.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224-76587d61.pth', ), 'mixer_b16_224.goog_in21k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224_in21k-617b3de2.pth', num_classes=21843 ), 'mixer_l32_224.untrained': _cfg(), 'mixer_l16_224.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224-92f9adc4.pth', ), 'mixer_l16_224.goog_in21k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224_in21k-846aa33c.pth', num_classes=21843 ), # Mixer ImageNet-21K-P pretraining 'mixer_b16_224.miil_in21k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mixer_b16_224_miil_in21k-2a558a71.pth', mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', num_classes=11221, ), 'mixer_b16_224.miil_in21k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mixer_b16_224_miil-9229a591.pth', mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', ), 'gmixer_12_224.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'gmixer_24_224.ra3_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmixer_24_224_raa-7daf7ae6.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_12_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_12_no_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_24_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_24_no_dist.pth', #url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resmlp_24_224_raa-a8256759.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_36_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_36_no_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_big_24_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_no_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_12_224.fb_distilled_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_24_224.fb_distilled_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_36_224.fb_distilled_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_36_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_big_24_224.fb_distilled_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_big_24_224.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_22k.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_12_224.fb_dino': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dino.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_24_224.fb_dino': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dino.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'gmlp_ti16_224.untrained': _cfg(), 'gmlp_s16_224.ra3_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmlp_s16_224_raa-10536d42.pth', ), 'gmlp_b16_224.untrained': _cfg(), }) @register_model def mixer_s32_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-S/32 224x224 Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=32, num_blocks=8, embed_dim=512, **kwargs) model = _create_mixer('mixer_s32_224', pretrained=pretrained, **model_args) return model @register_model def mixer_s16_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-S/16 224x224 Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=16, num_blocks=8, embed_dim=512, **kwargs) model = _create_mixer('mixer_s16_224', pretrained=pretrained, **model_args) return model @register_model def mixer_b32_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-B/32 224x224 Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=32, num_blocks=12, embed_dim=768, **kwargs) model = _create_mixer('mixer_b32_224', pretrained=pretrained, **model_args) return model @register_model def mixer_b16_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-B/16 224x224. ImageNet-1k pretrained weights. Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) model = _create_mixer('mixer_b16_224', pretrained=pretrained, **model_args) return model @register_model def mixer_l32_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-L/32 224x224. Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=32, num_blocks=24, embed_dim=1024, **kwargs) model = _create_mixer('mixer_l32_224', pretrained=pretrained, **model_args) return model @register_model def mixer_l16_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-L/16 224x224. ImageNet-1k pretrained weights. Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs) model = _create_mixer('mixer_l16_224', pretrained=pretrained, **model_args) return model @register_model def gmixer_12_224(pretrained=False, **kwargs) -> MlpMixer: """ Glu-Mixer-12 224x224 Experiment by Ross Wightman, adding SwiGLU to MLP-Mixer """ model_args = dict( patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=(1.0, 4.0), mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) model = _create_mixer('gmixer_12_224', pretrained=pretrained, **model_args) return model @register_model def gmixer_24_224(pretrained=False, **kwargs) -> MlpMixer: """ Glu-Mixer-24 224x224 Experiment by Ross Wightman, adding SwiGLU to MLP-Mixer """ model_args = dict( patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=(1.0, 4.0), mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) model = _create_mixer('gmixer_24_224', pretrained=pretrained, **model_args) return model @register_model def resmlp_12_224(pretrained=False, **kwargs) -> MlpMixer: """ ResMLP-12 Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 """ model_args = dict( patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) model = _create_mixer('resmlp_12_224', pretrained=pretrained, **model_args) return model @register_model def resmlp_24_224(pretrained=False, **kwargs) -> MlpMixer: """ ResMLP-24 Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 """ model_args = dict( patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) model = _create_mixer('resmlp_24_224', pretrained=pretrained, **model_args) return model @register_model def resmlp_36_224(pretrained=False, **kwargs) -> MlpMixer: """ ResMLP-36 Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 """ model_args = dict( patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4, block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) model = _create_mixer('resmlp_36_224', pretrained=pretrained, **model_args) return model @register_model def resmlp_big_24_224(pretrained=False, **kwargs) -> MlpMixer: """ ResMLP-B-24 Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 """ model_args = dict( patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) model = _create_mixer('resmlp_big_24_224', pretrained=pretrained, **model_args) return model @register_model def gmlp_ti16_224(pretrained=False, **kwargs) -> MlpMixer: """ gMLP-Tiny Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 """ model_args = dict( patch_size=16, num_blocks=30, embed_dim=128, mlp_ratio=6, block_layer=SpatialGatingBlock, mlp_layer=GatedMlp, **kwargs) model = _create_mixer('gmlp_ti16_224', pretrained=pretrained, **model_args) return model @register_model def gmlp_s16_224(pretrained=False, **kwargs) -> MlpMixer: """ gMLP-Small Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 """ model_args = dict( patch_size=16, num_blocks=30, embed_dim=256, mlp_ratio=6, block_layer=SpatialGatingBlock, mlp_layer=GatedMlp, **kwargs) model = _create_mixer('gmlp_s16_224', pretrained=pretrained, **model_args) return model @register_model def gmlp_b16_224(pretrained=False, **kwargs) -> MlpMixer: """ gMLP-Base Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 """ model_args = dict( patch_size=16, num_blocks=30, embed_dim=512, mlp_ratio=6, block_layer=SpatialGatingBlock, mlp_layer=GatedMlp, **kwargs) model = _create_mixer('gmlp_b16_224', pretrained=pretrained, **model_args) return model register_model_deprecations(__name__, { 'mixer_b16_224_in21k': 'mixer_b16_224.goog_in21k_ft_in1k', 'mixer_l16_224_in21k': 'mixer_l16_224.goog_in21k_ft_in1k', 'mixer_b16_224_miil': 'mixer_b16_224.miil_in21k_ft_in1k', 'mixer_b16_224_miil_in21k': 'mixer_b16_224.miil_in21k', 'resmlp_12_distilled_224': 'resmlp_12_224.fb_distilled_in1k', 'resmlp_24_distilled_224': 'resmlp_24_224.fb_distilled_in1k', 'resmlp_36_distilled_224': 'resmlp_36_224.fb_distilled_in1k', 'resmlp_big_24_distilled_224': 'resmlp_big_24_224.fb_distilled_in1k', 'resmlp_big_24_224_in22ft1k': 'resmlp_big_24_224.fb_in22k_ft_in1k', 'resmlp_12_224_dino': 'resmlp_12_224', 'resmlp_24_224_dino': 'resmlp_24_224', })
pytorch-image-models/timm/models/mlp_mixer.py/0
{ "file_path": "pytorch-image-models/timm/models/mlp_mixer.py", "repo_id": "pytorch-image-models", "token_count": 13020 }
""" Res2Net and Res2NeXt Adapted from Official Pytorch impl at: https://github.com/gasvn/Res2Net/ Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 """ import math import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .resnet import ResNet __all__ = [] class Bottle2neck(nn.Module): """ Res2Net/Res2NeXT Bottleneck Adapted from https://github.com/gasvn/Res2Net/blob/master/res2net.py """ expansion = 4 def __init__( self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=26, scale=4, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=None, attn_layer=None, **_, ): super(Bottle2neck, self).__init__() self.scale = scale self.is_first = stride > 1 or downsample is not None self.num_scales = max(1, scale - 1) width = int(math.floor(planes * (base_width / 64.0))) * cardinality self.width = width outplanes = planes * self.expansion first_dilation = first_dilation or dilation self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False) self.bn1 = norm_layer(width * scale) convs = [] bns = [] for i in range(self.num_scales): convs.append(nn.Conv2d( width, width, kernel_size=3, stride=stride, padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False)) bns.append(norm_layer(width)) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) if self.is_first: # FIXME this should probably have count_include_pad=False, but hurts original weights self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) else: self.pool = None self.conv3 = nn.Conv2d(width * scale, outplanes, kernel_size=1, bias=False) self.bn3 = norm_layer(outplanes) self.se = attn_layer(outplanes) if attn_layer is not None else None self.relu = act_layer(inplace=True) self.downsample = downsample def zero_init_last(self): if getattr(self.bn3, 'weight', None) is not None: nn.init.zeros_(self.bn3.weight) def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) spx = torch.split(out, self.width, 1) spo = [] sp = spx[0] # redundant, for torchscript for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): if i == 0 or self.is_first: sp = spx[i] else: sp = sp + spx[i] sp = conv(sp) sp = bn(sp) sp = self.relu(sp) spo.append(sp) if self.scale > 1: if self.pool is not None: # self.is_first == True, None check for torchscript spo.append(self.pool(spx[-1])) else: spo.append(spx[-1]) out = torch.cat(spo, 1) out = self.conv3(out) out = self.bn3(out) if self.se is not None: out = self.se(out) if self.downsample is not None: shortcut = self.downsample(x) out += shortcut out = self.relu(out) return out def _create_res2net(variant, pretrained=False, **kwargs): return build_model_with_cfg(ResNet, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'fc', **kwargs } default_cfgs = generate_default_cfgs({ 'res2net50_26w_4s.in1k': _cfg(hf_hub_id='timm/'), 'res2net50_48w_2s.in1k': _cfg(hf_hub_id='timm/'), 'res2net50_14w_8s.in1k': _cfg(hf_hub_id='timm/'), 'res2net50_26w_6s.in1k': _cfg(hf_hub_id='timm/'), 'res2net50_26w_8s.in1k': _cfg(hf_hub_id='timm/'), 'res2net101_26w_4s.in1k': _cfg(hf_hub_id='timm/'), 'res2next50.in1k': _cfg(hf_hub_id='timm/'), 'res2net50d.in1k': _cfg(hf_hub_id='timm/', first_conv='conv1.0'), 'res2net101d.in1k': _cfg(hf_hub_id='timm/', first_conv='conv1.0'), }) @register_model def res2net50_26w_4s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-50 26w4s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=4)) return _create_res2net('res2net50_26w_4s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net101_26w_4s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-101 26w4s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, block_args=dict(scale=4)) return _create_res2net('res2net101_26w_4s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50_26w_6s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-50 26w6s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=6)) return _create_res2net('res2net50_26w_6s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50_26w_8s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-50 26w8s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=8)) return _create_res2net('res2net50_26w_8s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50_48w_2s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-50 48w2s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=48, block_args=dict(scale=2)) return _create_res2net('res2net50_48w_2s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50_14w_8s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-50 14w8s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=14, block_args=dict(scale=8)) return _create_res2net('res2net50_14w_8s', pretrained, **dict(model_args, **kwargs)) @register_model def res2next50(pretrained=False, **kwargs) -> ResNet: """Construct Res2NeXt-50 4s """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=4, cardinality=8, block_args=dict(scale=4)) return _create_res2net('res2next50', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50d(pretrained=False, **kwargs) -> ResNet: """Construct Res2Net-50 """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, stem_type='deep', avg_down=True, stem_width=32, block_args=dict(scale=4)) return _create_res2net('res2net50d', pretrained, **dict(model_args, **kwargs)) @register_model def res2net101d(pretrained=False, **kwargs) -> ResNet: """Construct Res2Net-50 """ model_args = dict( block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, stem_type='deep', avg_down=True, stem_width=32, block_args=dict(scale=4)) return _create_res2net('res2net101d', pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/res2net.py/0
{ "file_path": "pytorch-image-models/timm/models/res2net.py", "repo_id": "pytorch-image-models", "token_count": 3659 }
"""VGG Adapted from https://github.com/pytorch/vision 'vgg.py' (BSD-3-Clause) with a few changes for timm functionality. Copyright 2021 Ross Wightman """ from typing import Any, Dict, List, Optional, Type, Union, cast import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ClassifierHead from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs __all__ = ['VGG'] cfgs: Dict[str, List[Union[str, int]]] = { 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], } @register_notrace_module # reason: FX can't symbolically trace control flow in forward method class ConvMlp(nn.Module): def __init__( self, in_features=512, out_features=4096, kernel_size=7, mlp_ratio=1.0, drop_rate: float = 0.2, act_layer: Type[nn.Module] = nn.ReLU, conv_layer: Type[nn.Module] = nn.Conv2d, ): super(ConvMlp, self).__init__() self.input_kernel_size = kernel_size mid_features = int(out_features * mlp_ratio) self.fc1 = conv_layer(in_features, mid_features, kernel_size, bias=True) self.act1 = act_layer(True) self.drop = nn.Dropout(drop_rate) self.fc2 = conv_layer(mid_features, out_features, 1, bias=True) self.act2 = act_layer(True) def forward(self, x): if x.shape[-2] < self.input_kernel_size or x.shape[-1] < self.input_kernel_size: # keep the input size >= 7x7 output_size = (max(self.input_kernel_size, x.shape[-2]), max(self.input_kernel_size, x.shape[-1])) x = F.adaptive_avg_pool2d(x, output_size) x = self.fc1(x) x = self.act1(x) x = self.drop(x) x = self.fc2(x) x = self.act2(x) return x class VGG(nn.Module): def __init__( self, cfg: List[Any], num_classes: int = 1000, in_chans: int = 3, output_stride: int = 32, mlp_ratio: float = 1.0, act_layer: Type[nn.Module] = nn.ReLU, conv_layer: Type[nn.Module] = nn.Conv2d, norm_layer: Optional[Type[nn.Module]] = None, global_pool: str = 'avg', drop_rate: float = 0., ) -> None: super(VGG, self).__init__() assert output_stride == 32 self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False self.use_norm = norm_layer is not None self.feature_info = [] prev_chs = in_chans net_stride = 1 pool_layer = nn.MaxPool2d layers: List[nn.Module] = [] for v in cfg: last_idx = len(layers) - 1 if v == 'M': self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{last_idx}')) layers += [pool_layer(kernel_size=2, stride=2)] net_stride *= 2 else: v = cast(int, v) conv2d = conv_layer(prev_chs, v, kernel_size=3, padding=1) if norm_layer is not None: layers += [conv2d, norm_layer(v), act_layer(inplace=True)] else: layers += [conv2d, act_layer(inplace=True)] prev_chs = v self.features = nn.Sequential(*layers) self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{len(layers) - 1}')) self.num_features = prev_chs self.head_hidden_size = 4096 self.pre_logits = ConvMlp( prev_chs, self.head_hidden_size, 7, mlp_ratio=mlp_ratio, drop_rate=drop_rate, act_layer=act_layer, conv_layer=conv_layer, ) self.head = ClassifierHead( self.head_hidden_size, num_classes, pool_type=global_pool, drop_rate=drop_rate, ) self._initialize_weights() @torch.jit.ignore def group_matcher(self, coarse=False): # this treats BN layers as separate groups for bn variants, a lot of effort to fix that return dict(stem=r'^features\.0', blocks=r'^features\.(\d+)') @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.features(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool = False): x = self.pre_logits(x) return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x def _initialize_weights(self) -> None: for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def _filter_fn(state_dict): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} for k, v in state_dict.items(): k_r = k k_r = k_r.replace('classifier.0', 'pre_logits.fc1') k_r = k_r.replace('classifier.3', 'pre_logits.fc2') k_r = k_r.replace('classifier.6', 'head.fc') if 'classifier.0.weight' in k: v = v.reshape(-1, 512, 7, 7) if 'classifier.3.weight' in k: v = v.reshape(-1, 4096, 1, 1) out_dict[k_r] = v return out_dict def _create_vgg(variant: str, pretrained: bool, **kwargs: Any) -> VGG: cfg = variant.split('_')[0] # NOTE: VGG is one of few models with stride==1 features w/ 6 out_indices [0..5] out_indices = kwargs.pop('out_indices', (0, 1, 2, 3, 4, 5)) model = build_model_with_cfg( VGG, variant, pretrained, model_cfg=cfgs[cfg], feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), pretrained_filter_fn=_filter_fn, **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'features.0', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'vgg11.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg13.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg16.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg19.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg11_bn.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg13_bn.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg16_bn.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg19_bn.tv_in1k': _cfg(hf_hub_id='timm/'), }) @register_model def vgg11(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 11-layer model (configuration "A") from `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(**kwargs) return _create_vgg('vgg11', pretrained=pretrained, **model_args) @register_model def vgg11_bn(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 11-layer model (configuration "A") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) return _create_vgg('vgg11_bn', pretrained=pretrained, **model_args) @register_model def vgg13(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 13-layer model (configuration "B") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(**kwargs) return _create_vgg('vgg13', pretrained=pretrained, **model_args) @register_model def vgg13_bn(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 13-layer model (configuration "B") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) return _create_vgg('vgg13_bn', pretrained=pretrained, **model_args) @register_model def vgg16(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 16-layer model (configuration "D") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(**kwargs) return _create_vgg('vgg16', pretrained=pretrained, **model_args) @register_model def vgg16_bn(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 16-layer model (configuration "D") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) return _create_vgg('vgg16_bn', pretrained=pretrained, **model_args) @register_model def vgg19(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 19-layer model (configuration "E") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(**kwargs) return _create_vgg('vgg19', pretrained=pretrained, **model_args) @register_model def vgg19_bn(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 19-layer model (configuration 'E') with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) return _create_vgg('vgg19_bn', pretrained=pretrained, **model_args)
pytorch-image-models/timm/models/vgg.py/0
{ "file_path": "pytorch-image-models/timm/models/vgg.py", "repo_id": "pytorch-image-models", "token_count": 5197 }
import math import torch from torch.optim.optimizer import Optimizer class AdaBelief(Optimizer): r"""Implements AdaBelief algorithm. Modified from Adam in PyTorch Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-16) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) decoupled_decay (boolean, optional): (default: True) If set as True, then the optimizer uses decoupled weight decay as in AdamW fixed_decay (boolean, optional): (default: False) This is used when weight_decouple is set as True. When fixed_decay == True, the weight decay is performed as $W_{new} = W_{old} - W_{old} \times decay$. When fixed_decay == False, the weight decay is performed as $W_{new} = W_{old} - W_{old} \times decay \times lr$. Note that in this case, the weight decay ratio decreases with learning rate (lr). rectify (boolean, optional): (default: True) If set as True, then perform the rectified update similar to RAdam degenerated_to_sgd (boolean, optional) (default:True) If set as True, then perform SGD update when variance of gradient is high reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients, NeurIPS 2020 For a complete table of recommended hyperparameters, see https://github.com/juntang-zhuang/Adabelief-Optimizer' For example train/args for EfficientNet see these gists - link to train_script: https://gist.github.com/juntang-zhuang/0a501dd51c02278d952cf159bc233037 - link to args.yaml: https://gist.github.com/juntang-zhuang/517ce3c27022b908bb93f78e4f786dc3 """ def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16, weight_decay=0, amsgrad=False, decoupled_decay=True, fixed_decay=False, rectify=True, degenerated_to_sgd=True, ): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict): for param in params: if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]): param['buffer'] = [[None, None, None] for _ in range(10)] defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, degenerated_to_sgd=degenerated_to_sgd, decoupled_decay=decoupled_decay, rectify=rectify, fixed_decay=fixed_decay, buffer=[[None, None, None] for _ in range(10)] ) super(AdaBelief, self).__init__(params, defaults) def __setstate__(self, state): super(AdaBelief, self).__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) @torch.no_grad() def reset(self): for group in self.param_groups: for p in group['params']: state = self.state[p] amsgrad = group['amsgrad'] # State initialization state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p) # Exponential moving average of squared gradient values state['exp_avg_var'] = torch.zeros_like(p) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_var'] = torch.zeros_like(p) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError( 'AdaBelief does not support sparse gradients, please consider SparseAdam instead') p_fp32 = p if p.dtype in {torch.float16, torch.bfloat16}: p_fp32 = p_fp32.float() amsgrad = group['amsgrad'] beta1, beta2 = group['betas'] state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p_fp32) # Exponential moving average of squared gradient values state['exp_avg_var'] = torch.zeros_like(p_fp32) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_var'] = torch.zeros_like(p_fp32) # perform weight decay, check if decoupled weight decay if group['decoupled_decay']: if not group['fixed_decay']: p_fp32.mul_(1.0 - group['lr'] * group['weight_decay']) else: p_fp32.mul_(1.0 - group['weight_decay']) else: if group['weight_decay'] != 0: grad.add_(p_fp32, alpha=group['weight_decay']) # get current state variable exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var'] state['step'] += 1 bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] # Update first and second moment running average exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) grad_residual = grad - exp_avg exp_avg_var.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1 - beta2) if amsgrad: max_exp_avg_var = state['max_exp_avg_var'] # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_var, exp_avg_var.add_(group['eps']), out=max_exp_avg_var) # Use the max. for normalizing running avg. of gradient denom = (max_exp_avg_var.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) else: denom = (exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) # update if not group['rectify']: # Default update step_size = group['lr'] / bias_correction1 p_fp32.addcdiv_(exp_avg, denom, value=-step_size) else: # Rectified update, forked from RAdam buffered = group['buffer'][int(state['step'] % 10)] if state['step'] == buffered[0]: num_sma, step_size = buffered[1], buffered[2] else: buffered[0] = state['step'] beta2_t = beta2 ** state['step'] num_sma_max = 2 / (1 - beta2) - 1 num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) buffered[1] = num_sma # more conservative since it's an approximated value if num_sma >= 5: step_size = math.sqrt( (1 - beta2_t) * (num_sma - 4) / (num_sma_max - 4) * (num_sma - 2) / num_sma * num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) elif group['degenerated_to_sgd']: step_size = 1.0 / (1 - beta1 ** state['step']) else: step_size = -1 buffered[2] = step_size if num_sma >= 5: denom = exp_avg_var.sqrt().add_(group['eps']) p_fp32.addcdiv_(exp_avg, denom, value=-step_size * group['lr']) elif step_size > 0: p_fp32.add_(exp_avg, alpha=-step_size * group['lr']) if p.dtype in {torch.float16, torch.bfloat16}: p.copy_(p_fp32) return loss
pytorch-image-models/timm/optim/adabelief.py/0
{ "file_path": "pytorch-image-models/timm/optim/adabelief.py", "repo_id": "pytorch-image-models", "token_count": 5278 }
import math import torch from torch.optim.optimizer import Optimizer class NAdamLegacy(Optimizer): """Implements Nadam algorithm (a variant of Adam based on Nesterov momentum). NOTE: This impl has been deprecated in favour of torch.optim.NAdam and remains as a reference It has been proposed in `Incorporating Nesterov Momentum into Adam`__. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 2e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) schedule_decay (float, optional): momentum schedule decay (default: 4e-3) __ http://cs229.stanford.edu/proj2015/054_report.pdf __ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf Originally taken from: https://github.com/pytorch/pytorch/pull/1408 NOTE: Has potential issues but does work well on some problems. """ def __init__( self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, schedule_decay=4e-3, ): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, schedule_decay=schedule_decay, ) super(NAdamLegacy, self).__init__(params, defaults) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 state['m_schedule'] = 1. state['exp_avg'] = torch.zeros_like(p) state['exp_avg_sq'] = torch.zeros_like(p) # Warming momentum schedule m_schedule = state['m_schedule'] schedule_decay = group['schedule_decay'] exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] eps = group['eps'] state['step'] += 1 t = state['step'] bias_correction2 = 1 - beta2 ** t if group['weight_decay'] != 0: grad = grad.add(p, alpha=group['weight_decay']) momentum_cache_t = beta1 * (1. - 0.5 * (0.96 ** (t * schedule_decay))) momentum_cache_t_1 = beta1 * (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay))) m_schedule_new = m_schedule * momentum_cache_t m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1 state['m_schedule'] = m_schedule_new # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1. - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1. - beta2) denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) p.addcdiv_(grad, denom, value=-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new)) p.addcdiv_(exp_avg, denom, value=-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next)) return loss
pytorch-image-models/timm/optim/nadam.py/0
{ "file_path": "pytorch-image-models/timm/optim/nadam.py", "repo_id": "pytorch-image-models", "token_count": 2021 }
""" Step Scheduler Basic step LR schedule with warmup, noise. Hacked together by / Copyright 2020 Ross Wightman """ import math import torch from typing import List from .scheduler import Scheduler class StepLRScheduler(Scheduler): """ """ def __init__( self, optimizer: torch.optim.Optimizer, decay_t: float, decay_rate: float = 1., warmup_t=0, warmup_lr_init=0, warmup_prefix=True, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, initialize=True, ) -> None: super().__init__( optimizer, param_group_field="lr", t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize, ) self.decay_t = decay_t self.decay_rate = decay_rate self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values] return lrs
pytorch-image-models/timm/scheduler/step_lr.py/0
{ "file_path": "pytorch-image-models/timm/scheduler/step_lr.py", "repo_id": "pytorch-image-models", "token_count": 951 }
from typing import Optional, Tuple, List import torch def onnx_forward(onnx_file, example_input): import onnxruntime sess_options = onnxruntime.SessionOptions() session = onnxruntime.InferenceSession(onnx_file, sess_options) input_name = session.get_inputs()[0].name output = session.run([], {input_name: example_input.numpy()}) output = output[0] return output def onnx_export( model: torch.nn.Module, output_file: str, example_input: Optional[torch.Tensor] = None, training: bool = False, verbose: bool = False, check: bool = True, check_forward: bool = False, batch_size: int = 64, input_size: Tuple[int, int, int] = None, opset: Optional[int] = None, dynamic_size: bool = False, aten_fallback: bool = False, keep_initializers: Optional[bool] = None, use_dynamo: bool = False, input_names: List[str] = None, output_names: List[str] = None, ): import onnx if training: training_mode = torch.onnx.TrainingMode.TRAINING model.train() else: training_mode = torch.onnx.TrainingMode.EVAL model.eval() if example_input is None: if not input_size: assert hasattr(model, 'default_cfg') input_size = model.default_cfg.get('input_size') example_input = torch.randn((batch_size,) + input_size, requires_grad=training) # Run model once before export trace, sets padding for models with Conv2dSameExport. This means # that the padding for models with Conv2dSameExport (most models with tf_ prefix) is fixed for # the input img_size specified in this script. # Opset >= 11 should allow for dynamic padding, however I cannot get it to work due to # issues in the tracing of the dynamic padding or errors attempting to export the model after jit # scripting it (an approach that should work). Perhaps in a future PyTorch or ONNX versions... with torch.no_grad(): original_out = model(example_input) input_names = input_names or ["input0"] output_names = output_names or ["output0"] dynamic_axes = {'input0': {0: 'batch'}, 'output0': {0: 'batch'}} if dynamic_size: dynamic_axes['input0'][2] = 'height' dynamic_axes['input0'][3] = 'width' if aten_fallback: export_type = torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK else: export_type = torch.onnx.OperatorExportTypes.ONNX if use_dynamo: export_options = torch.onnx.ExportOptions(dynamic_shapes=dynamic_size) export_output = torch.onnx.dynamo_export( model, example_input, export_options=export_options, ) export_output.save(output_file) torch_out = None else: torch_out = torch.onnx._export( model, example_input, output_file, training=training_mode, export_params=True, verbose=verbose, input_names=input_names, output_names=output_names, keep_initializers_as_inputs=keep_initializers, dynamic_axes=dynamic_axes, opset_version=opset, operator_export_type=export_type ) if check: onnx_model = onnx.load(output_file) onnx.checker.check_model(onnx_model, full_check=True) # assuming throw on error if check_forward and not training: import numpy as np onnx_out = onnx_forward(output_file, example_input) if torch_out is not None: np.testing.assert_almost_equal(torch_out.numpy(), onnx_out, decimal=3) np.testing.assert_almost_equal(original_out.numpy(), torch_out.numpy(), decimal=5) else: np.testing.assert_almost_equal(original_out.numpy(), onnx_out, decimal=3)
pytorch-image-models/timm/utils/onnx.py/0
{ "file_path": "pytorch-image-models/timm/utils/onnx.py", "repo_id": "pytorch-image-models", "token_count": 1722 }
.PHONY: quality style test docs utils check_dirs := examples src tests utils # Check code quality of the source code quality: ruff check $(check_dirs) ruff format --check $(check_dirs) python utils/check_tests_in_ci.py # Format source code automatically style: ruff check $(check_dirs) --fix ruff format $(check_dirs) # Run smolagents tests test: pytest ./tests/
smolagents/Makefile/0
{ "file_path": "smolagents/Makefile", "repo_id": "smolagents", "token_count": 123 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Building good agents [[open-in-colab]] There's a world of difference between building an agent that works and one that doesn't. How can we build agents that fall into the former category? In this guide, we're going to talk about best practices for building agents. > [!TIP] > If you're new to building agents, make sure to first read the [intro to agents](../conceptual_guides/intro_agents) and the [guided tour of smolagents](../guided_tour). ### The best agentic systems are the simplest: simplify the workflow as much as you can Giving an LLM some agency in your workflow introduces some risk of errors. Well-programmed agentic systems have good error logging and retry mechanisms anyway, so the LLM engine has a chance to self-correct their mistake. But to reduce the risk of LLM error to the maximum, you should simplify your workflow! Let's revisit the example from the [intro to agents](../conceptual_guides/intro_agents): a bot that answers user queries for a surf trip company. Instead of letting the agent do 2 different calls for "travel distance API" and "weather API" each time they are asked about a new surf spot, you could just make one unified tool "return_spot_information", a function that calls both APIs at once and returns their concatenated outputs to the user. This will reduce costs, latency, and error risk! The main guideline is: Reduce the number of LLM calls as much as you can. This leads to a few takeaways: - Whenever possible, group 2 tools in one, like in our example of the two APIs. - Whenever possible, logic should be based on deterministic functions rather than agentic decisions. ### Improve the information flow to the LLM engine Remember that your LLM engine is like an *intelligent* robot, tapped into a room with the only communication with the outside world being notes passed under a door. It won't know of anything that happened if you don't explicitly put that into its prompt. So first start with making your task very clear! Since an agent is powered by an LLM, minor variations in your task formulation might yield completely different results. Then, improve the information flow towards your agent in tool use. Particular guidelines to follow: - Each tool should log (by simply using `print` statements inside the tool's `forward` method) everything that could be useful for the LLM engine. - In particular, logging detail on tool execution errors would help a lot! For instance, here's a tool that retrieves weather data based on location and date-time: First, here's a poor version: ```python import datetime from smolagents import tool def get_weather_report_at_coordinates(coordinates, date_time): # Dummy function, returns a list of [temperature in °C, risk of rain on a scale 0-1, wave height in m] return [28.0, 0.35, 0.85] def convert_location_to_coordinates(location): # Returns dummy coordinates return [3.3, -42.0] @tool def get_weather_api(location: str, date_time: str) -> str: """ Returns the weather report. Args: location: the name of the place that you want the weather for. date_time: the date and time for which you want the report. """ lon, lat = convert_location_to_coordinates(location) date_time = datetime.strptime(date_time) return str(get_weather_report_at_coordinates((lon, lat), date_time)) ``` Why is it bad? - there's no precision of the format that should be used for `date_time` - there's no detail on how location should be specified. - there's no logging mechanism trying to make explicit failure cases like location not being in a proper format, or date_time not being properly formatted. - the output format is hard to understand If the tool call fails, the error trace logged in memory can help the LLM reverse engineer the tool to fix the errors. But why leave it with so much heavy lifting to do? A better way to build this tool would have been the following: ```python @tool def get_weather_api(location: str, date_time: str) -> str: """ Returns the weather report. Args: location: the name of the place that you want the weather for. Should be a place name, followed by possibly a city name, then a country, like "Anchor Point, Taghazout, Morocco". date_time: the date and time for which you want the report, formatted as '%m/%d/%y %H:%M:%S'. """ lon, lat = convert_location_to_coordinates(location) try: date_time = datetime.strptime(date_time) except Exception as e: raise ValueError("Conversion of `date_time` to datetime format failed, make sure to provide a string in format '%m/%d/%y %H:%M:%S'. Full trace:" + str(e)) temperature_celsius, risk_of_rain, wave_height = get_weather_report_at_coordinates((lon, lat), date_time) return f"Weather report for {location}, {date_time}: Temperature will be {temperature_celsius}°C, risk of rain is {risk_of_rain*100:.0f}%, wave height is {wave_height}m." ``` In general, to ease the load on your LLM, the good question to ask yourself is: "How easy would it be for me, if I was dumb and using this tool for the first time ever, to program with this tool and correct my own errors?". ### Give more arguments to the agent To pass some additional objects to your agent beyond the simple string describing the task, you can use the `additional_args` argument to pass any type of object: ```py from smolagents import CodeAgent, HfApiModel model_id = "meta-llama/Llama-3.3-70B-Instruct" agent = CodeAgent(tools=[], model=HfApiModel(model_id=model_id), add_base_tools=True) agent.run( "Why does Mike not know many people in New York?", additional_args={"mp3_sound_file_url":'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/recording.mp3'} ) ``` For instance, you can use this `additional_args` argument to pass images or strings that you want your agent to leverage. ## How to debug your agent ### 1. Use a stronger LLM In an agentic workflows, some of the errors are actual errors, some other are the fault of your LLM engine not reasoning properly. For instance, consider this trace for an `CodeAgent` that I asked to create a car picture: ``` ==================================================================================================== New task ==================================================================================================== Make me a cool car picture ──────────────────────────────────────────────────────────────────────────────────────────────────── New step ──────────────────────────────────────────────────────────────────────────────────────────────────── Agent is executing the code below: ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── image_generator(prompt="A cool, futuristic sports car with LED headlights, aerodynamic design, and vibrant color, high-res, photorealistic") ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── Last output from code snippet: ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── /var/folders/6m/9b1tts6d5w960j80wbw9tx3m0000gn/T/tmpx09qfsdd/652f0007-3ee9-44e2-94ac-90dae6bb89a4.png Step 1: - Time taken: 16.35 seconds - Input tokens: 1,383 - Output tokens: 77 ──────────────────────────────────────────────────────────────────────────────────────────────────── New step ──────────────────────────────────────────────────────────────────────────────────────────────────── Agent is executing the code below: ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── final_answer("/var/folders/6m/9b1tts6d5w960j80wbw9tx3m0000gn/T/tmpx09qfsdd/652f0007-3ee9-44e2-94ac-90dae6bb89a4.png") ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── Print outputs: Last output from code snippet: ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── /var/folders/6m/9b1tts6d5w960j80wbw9tx3m0000gn/T/tmpx09qfsdd/652f0007-3ee9-44e2-94ac-90dae6bb89a4.png Final answer: /var/folders/6m/9b1tts6d5w960j80wbw9tx3m0000gn/T/tmpx09qfsdd/652f0007-3ee9-44e2-94ac-90dae6bb89a4.png ``` The user sees, instead of an image being returned, a path being returned to them. It could look like a bug from the system, but actually the agentic system didn't cause the error: it's just that the LLM brain did the mistake of not saving the image output into a variable. Thus it cannot access the image again except by leveraging the path that was logged while saving the image, so it returns the path instead of an image. The first step to debugging your agent is thus "Use a more powerful LLM". Alternatives like `Qwen2/5-72B-Instruct` wouldn't have made that mistake. ### 2. Provide more guidance / more information You can also use less powerful models, provided you guide them more effectively. Put yourself in the shoes of your model: if you were the model solving the task, would you struggle with the information available to you (from the system prompt + task formulation + tool description) ? Would you need some added clarifications? To provide extra information, we do not recommend to change the system prompt right away: the default system prompt has many adjustments that you do not want to mess up except if you understand the prompt very well. Better ways to guide your LLM engine are: - If it's about the task to solve: add all these details to the task. The task could be 100s of pages long. - If it's about how to use tools: the description attribute of your tools. ### 3. Change the system prompt (generally not advised) If above clarifications are not sufficient, you can change the system prompt. Let's see how it works. For example, let us check the default system prompt for the [`CodeAgent`] (below version is shortened by skipping zero-shot examples). ```python print(agent.prompt_templates["system_prompt"]) ``` Here is what you get: ```text You are an expert assistant who can solve any task using code blobs. You will be given a task to solve as best you can. To do so, you have been given access to a list of tools: these tools are basically Python functions which you can call with code. To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences. At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task and the tools that you want to use. Then in the 'Code:' sequence, you should write the code in simple Python. The code sequence must end with '<end_code>' sequence. During each intermediate step, you can use 'print()' to save whatever important information you will then need. These print outputs will then appear in the 'Observation:' field, which will be available as input for the next step. In the end you have to return a final answer using the `final_answer` tool. Here are a few examples using notional tools: --- {examples} Above example were using notional tools that might not exist for you. On top of performing computations in the Python code snippets that you create, you only have access to these tools: {{tool_descriptions}} {{managed_agents_descriptions}} Here are the rules you should always follow to solve your task: 1. Always provide a 'Thought:' sequence, and a 'Code:\n```py' sequence ending with '```<end_code>' sequence, else you will fail. 2. Use only variables that you have defined! 3. Always use the right arguments for the tools. DO NOT pass the arguments as a dict as in 'answer = wiki({'query': "What is the place where James Bond lives?"})', but use the arguments directly as in 'answer = wiki(query="What is the place where James Bond lives?")'. 4. Take care to not chain too many sequential tool calls in the same code block, especially when the output format is unpredictable. For instance, a call to search has an unpredictable return format, so do not have another tool call that depends on its output in the same block: rather output results with print() to use them in the next block. 5. Call a tool only when needed, and never re-do a tool call that you previously did with the exact same parameters. 6. Don't name any new variable with the same name as a tool: for instance don't name a variable 'final_answer'. 7. Never create any notional variables in our code, as having these in your logs might derail you from the true variables. 8. You can use imports in your code, but only from the following list of modules: {{authorized_imports}} 9. The state persists between code executions: so if in one step you've created variables or imported modules, these will all persist. 10. Don't give up! You're in charge of solving the task, not providing directions to solve it. Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000. ``` As you can see, there are placeholders like `"{{tool_descriptions}}"`: these will be used upon agent initialization to insert certain automatically generated descriptions of tools or managed agents. So while you can overwrite this system prompt template by passing your custom prompt as an argument to the `system_prompt` parameter, your new system prompt must contain the following placeholders: - `"{{tool_descriptions}}"` to insert tool descriptions. - `"{{managed_agents_description}}"` to insert the description for managed agents if there are any. - For `CodeAgent` only: `"{{authorized_imports}}"` to insert the list of authorized imports. Then you can change the system prompt as follows: ```py from smolagents.prompts import CODE_SYSTEM_PROMPT modified_system_prompt = CODE_SYSTEM_PROMPT + "\nHere you go!" # Change the system prompt here agent = CodeAgent( tools=[], model=HfApiModel(), system_prompt=modified_system_prompt ) ``` This also works with the [`ToolCallingAgent`]. ### 4. Extra planning We provide a model for a supplementary planning step, that an agent can run regularly in-between normal action steps. In this step, there is no tool call, the LLM is simply asked to update a list of facts it knows and to reflect on what steps it should take next based on those facts. ```py from smolagents import load_tool, CodeAgent, HfApiModel, DuckDuckGoSearchTool from dotenv import load_dotenv load_dotenv() # Import tool from Hub image_generation_tool = load_tool("m-ric/text-to-image", trust_remote_code=True) search_tool = DuckDuckGoSearchTool() agent = CodeAgent( tools=[search_tool, image_generation_tool], model=HfApiModel("Qwen/Qwen2.5-72B-Instruct"), planning_interval=3 # This is where you activate planning! ) # Run it! result = agent.run( "How long would a cheetah at full speed take to run the length of Pont Alexandre III?", ) ```
smolagents/docs/source/en/tutorials/building_good_agents.md/0
{ "file_path": "smolagents/docs/source/en/tutorials/building_good_agents.md", "repo_id": "smolagents", "token_count": 4250 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # OpenTelemetry के साथ runs का निरीक्षण [[open-in-colab]] > [!TIP] > यदि आप एजेंट्स बनाने में नए हैं, तो पहले [एजेंट्स का परिचय](../conceptual_guides/intro_agents) और [smolagents की गाइडेड टूर](../guided_tour) पढ़ना सुनिश्चित करें। ### Agents runs को लॉग क्यों करें? Agent runs को डीबग करना जटिल होता है। यह सत्यापित करना कठिन है कि एक रन ठीक से चला या नहीं, क्योंकि एजेंट वर्कफ़्लो [डिज़ाइन के अनुसार अप्रत्याशित](../conceptual_guides/intro_agents) होते हैं (यदि वे प्रत्याशित होते, तो आप पुराने अच्छे कोड का ही उपयोग कर रहे होते)। और रन का निरीक्षण करना भी कठिन है: मल्टी-स्टेप एजेंट्स जल्दी ही कंसोल को लॉग से भर देते हैं, और अधिकांश त्रुटियां केवल "LLM dumb" प्रकार की त्रुटियां होती हैं, जिनसे LLM अगले चरण में बेहतर कोड या टूल कॉल लिखकर स्वयं को सुधार लेता है। इसलिए बाद के निरीक्षण और मॉनिटरिंग के लिए प्रोडक्शन में agent runs को रिकॉर्ड करने के लिए इंस्ट्रुमेंटेशन का उपयोग करना आवश्यक है! हमने agent runs को इंस्ट्रुमेंट करने के लिए [OpenTelemetry](https://opentelemetry.io/) मानक को अपनाया है। इसका मतलब है कि आप बस कुछ इंस्ट्रुमेंटेशन कोड चला सकते हैं, फिर अपने एजेंट्स को सामान्य रूप से चला सकते हैं, और सब कुछ आपके प्लेटफॉर्म में लॉग हो जाता है। यह इस प्रकार होता है: पहले आवश्यक पैकेज इंस्टॉल करें। यहां हम [Phoenix by Arize AI](https://github.com/Arize-ai/phoenix) इंस्टॉल करते हैं क्योंकि यह लॉग्स को एकत्र और निरीक्षण करने का एक अच्छा समाधान है, लेकिन इस संग्रह और निरीक्षण भाग के लिए आप अन्य OpenTelemetry-कम्पैटिबल प्लेटफॉर्म्स का उपयोग कर सकते हैं। ```shell pip install smolagents pip install arize-phoenix opentelemetry-sdk opentelemetry-exporter-otlp openinference-instrumentation-smolagents ``` फिर कलेक्टर को बैकग्राउंड में चलाएं। ```shell python -m phoenix.server.main serve ``` अंत में, अपने एजेंट्स को ट्रेस करने और ट्रेस को नीचे परिभाषित एंडपॉइंट पर Phoenix को भेजने के लिए `SmolagentsInstrumentor` को सेट करें। ```python from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor from openinference.instrumentation.smolagents import SmolagentsInstrumentor from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor endpoint = "http://0.0.0.0:6006/v1/traces" trace_provider = TracerProvider() trace_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint))) SmolagentsInstrumentor().instrument(tracer_provider=trace_provider) ``` तब आप अपने एजेंट चला सकते हैं! ```py from smolagents import ( CodeAgent, ToolCallingAgent, ManagedAgent, DuckDuckGoSearchTool, VisitWebpageTool, HfApiModel, ) model = HfApiModel() agent = ToolCallingAgent( tools=[DuckDuckGoSearchTool(), VisitWebpageTool()], model=model, ) managed_agent = ManagedAgent( agent=agent, name="managed_agent", description="This is an agent that can do web search.", ) manager_agent = CodeAgent( tools=[], model=model, managed_agents=[managed_agent], ) manager_agent.run( "If the US keeps its 2024 growth rate, how many years will it take for the GDP to double?" ) ``` और फिर आप अपने रन का निरीक्षण करने के लिए `http://0.0.0.0:6006/projects/` पर जा सकते हैं! <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/inspect_run_phoenix.png"> आप देख सकते हैं कि CodeAgent ने अपने मैनेज्ड ToolCallingAgent को (वैसे, मैनेज्ड एजेंट एक CodeAgent भी हो सकता था) U.S. 2024 ग्रोथ रेट के लिए वेब सर्च चलाने के लिए कॉल किया। फिर मैनेज्ड एजेंट ने अपनी रिपोर्ट लौटाई और मैनेजर एजेंट ने अर्थव्यवस्था के दोगुना होने का समय गणना करने के लिए उस पर कार्य किया! अच्छा है, है ना?
smolagents/docs/source/hi/tutorials/inspect_runs.md/0
{ "file_path": "smolagents/docs/source/hi/tutorials/inspect_runs.md", "repo_id": "smolagents", "token_count": 3463 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 工具 [[open-in-colab]] 在这里,我们将学习高级工具的使用。 > [!TIP] > 如果你是构建 agent 的新手,请确保先阅读 [agent 介绍](../conceptual_guides/intro_agents) 和 [smolagents 导览](../guided_tour)。 - [工具](#工具) - [什么是工具,如何构建一个工具?](#什么是工具如何构建一个工具) - [将你的工具分享到 Hub](#将你的工具分享到-hub) - [将 Space 导入为工具](#将-space-导入为工具) - [使用 LangChain 工具](#使用-langchain-工具) - [管理你的 agent 工具箱](#管理你的-agent-工具箱) - [使用工具集合](#使用工具集合) ### 什么是工具,如何构建一个工具? 工具主要是 LLM 可以在 agent 系统中使用的函数。 但要使用它,LLM 需要被提供一个 API:名称、工具描述、输入类型和描述、输出类型。 所以它不能仅仅是一个函数。它应该是一个类。 因此,核心上,工具是一个类,它包装了一个函数,并带有帮助 LLM 理解如何使用它的元数据。 以下是它的结构: ```python from smolagents import Tool class HFModelDownloadsTool(Tool): name = "model_download_counter" description = """ This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. It returns the name of the checkpoint.""" inputs = { "task": { "type": "string", "description": "the task category (such as text-classification, depth-estimation, etc)", } } output_type = "string" def forward(self, task: str): from huggingface_hub import list_models model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) return model.id model_downloads_tool = HFModelDownloadsTool() ``` 自定义工具继承 [`Tool`] 以继承有用的方法。子类还定义了: - 一个属性 `name`,对应于工具本身的名称。名称通常描述工具的功能。由于代码返回任务中下载量最多的模型,我们将其命名为 `model_download_counter`。 - 一个属性 `description`,用于填充 agent 的系统提示。 - 一个 `inputs` 属性,它是一个带有键 `"type"` 和 `"description"` 的字典。它包含帮助 Python 解释器对输入做出明智选择的信息。 - 一个 `output_type` 属性,指定输出类型。`inputs` 和 `output_type` 的类型应为 [Pydantic 格式](https://docs.pydantic.dev/latest/concepts/json_schema/#generating-json-schema),它们可以是以下之一:[`~AUTHORIZED_TYPES`]。 - 一个 `forward` 方法,包含要执行的推理代码。 这就是它在 agent 中使用所需的全部内容! 还有另一种构建工具的方法。在 [guided_tour](../guided_tour) 中,我们使用 `@tool` 装饰器实现了一个工具。[`tool`] 装饰器是定义简单工具的推荐方式,但有时你需要更多:在类中使用多个方法以获得更清晰的代码,或使用额外的类属性。 在这种情况下,你可以通过如上所述继承 [`Tool`] 来构建你的工具。 ### 将你的工具分享到 Hub 你可以通过调用 [`~Tool.push_to_hub`] 将你的自定义工具分享到 Hub。确保你已经在 Hub 上为其创建了一个仓库,并且使用的是具有读取权限的 token。 ```python model_downloads_tool.push_to_hub("{your_username}/hf-model-downloads", token="<YOUR_HUGGINGFACEHUB_API_TOKEN>") ``` 为了使推送到 Hub 正常工作,你的工具需要遵守一些规则: - 所有方法都是自包含的,例如使用来自其参数中的变量。 - 根据上述要点,**所有导入应直接在工具的函数中定义**,否则在尝试使用 [`~Tool.save`] 或 [`~Tool.push_to_hub`] 调用你的自定义工具时会出现错误。 - 如果你继承了 `__init__` 方法,除了 `self` 之外,你不能给它任何其他参数。这是因为在特定工具实例初始化期间设置的参数很难跟踪,这阻碍了将它们正确分享到 Hub。无论如何,创建特定类的想法是你已经可以为任何需要硬编码的内容设置类属性(只需在 `class YourTool(Tool):` 行下直接设置 `your_variable=(...)`)。当然,你仍然可以通过将内容分配给 `self.your_variable` 在代码中的任何地方创建类属性。 一旦你的工具被推送到 Hub,你就可以查看它。[这里](https://huggingface.co/spaces/m-ric/hf-model-downloads) 是我推送的 `model_downloads_tool`。它有一个漂亮的 gradio 界面。 在深入工具文件时,你可以发现所有工具的逻辑都在 [tool.py](https://huggingface.co/spaces/m-ric/hf-model-downloads/blob/main/tool.py) 下。这是你可以检查其他人分享的工具的地方。 然后你可以使用 [`load_tool`] 加载工具或使用 [`~Tool.from_hub`] 创建它,并将其传递给 agent 中的 `tools` 参数。 由于运行工具意味着运行自定义代码,你需要确保你信任该仓库,因此我们需要传递 `trust_remote_code=True` 来从 Hub 加载工具。 ```python from smolagents import load_tool, CodeAgent model_download_tool = load_tool( "{your_username}/hf-model-downloads", trust_remote_code=True ) ``` ### 将 Space 导入为工具 你可以使用 [`Tool.from_space`] 方法直接从 Hub 导入一个 Space 作为工具! 你只需要提供 Hub 上 Space 的 id、它的名称和一个帮助你的 agent 理解工具功能的描述。在底层,这将使用 [`gradio-client`](https://pypi.org/project/gradio-client/) 库来调用 Space。 例如,让我们从 Hub 导入 [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) Space 并使用它生成一张图片。 ```python image_generation_tool = Tool.from_space( "black-forest-labs/FLUX.1-schnell", name="image_generator", description="Generate an image from a prompt" ) image_generation_tool("A sunny beach") ``` 瞧,这是你的图片!🏖️ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/sunny_beach.webp"> 然后你可以像使用任何其他工具一样使用这个工具。例如,让我们改进提示 `A rabbit wearing a space suit` 并生成它的图片。 ```python from smolagents import CodeAgent, HfApiModel model = HfApiModel("Qwen/Qwen2.5-Coder-32B-Instruct") agent = CodeAgent(tools=[image_generation_tool], model=model) agent.run( "Improve this prompt, then generate an image of it.", additional_args={'user_prompt': 'A rabbit wearing a space suit'} ) ``` ```text === Agent thoughts: improved_prompt could be "A bright blue space suit wearing rabbit, on the surface of the moon, under a bright orange sunset, with the Earth visible in the background" Now that I have improved the prompt, I can use the image generator tool to generate an image based on this prompt. >>> Agent is executing the code below: image = image_generator(prompt="A bright blue space suit wearing rabbit, on the surface of the moon, under a bright orange sunset, with the Earth visible in the background") final_answer(image) ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit_spacesuit_flux.webp"> 这得有多酷?🤩 ### 使用 LangChain 工具 我们喜欢 Langchain,并认为它有一套非常吸引人的工具。 要从 LangChain 导入工具,请使用 `from_langchain()` 方法。 以下是如何使用它来重现介绍中的搜索结果,使用 LangChain 的 web 搜索工具。 这个工具需要 `pip install langchain google-search-results -q` 才能正常工作。 ```python from langchain.agents import load_tools search_tool = Tool.from_langchain(load_tools(["serpapi"])[0]) agent = CodeAgent(tools=[search_tool], model=model) agent.run("How many more blocks (also denoted as layers) are in BERT base encoder compared to the encoder from the architecture proposed in Attention is All You Need?") ``` ### 管理你的 agent 工具箱 你可以通过添加或替换工具来管理 agent 的工具箱。 让我们将 `model_download_tool` 添加到一个仅使用默认工具箱初始化的现有 agent 中。 ```python from smolagents import HfApiModel model = HfApiModel("Qwen/Qwen2.5-Coder-32B-Instruct") agent = CodeAgent(tools=[], model=model, add_base_tools=True) agent.tools[model_download_tool.name] = model_download_tool ``` 现在我们可以利用新工具: ```python agent.run( "Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub but reverse the letters?" ) ``` > [!TIP] > 注意不要向 agent 添加太多工具:这可能会让较弱的 LLM 引擎不堪重负。 ### 使用工具集合 你可以通过使用 ToolCollection 对象来利用工具集合,使用你想要使用的集合的 slug。 然后将它们作为列表传递给 agent 初始化,并开始使用它们! ```py from smolagents import ToolCollection, CodeAgent image_tool_collection = ToolCollection.from_hub( collection_slug="huggingface-tools/diffusion-tools-6630bb19a942c2306a2cdb6f", token="<YOUR_HUGGINGFACEHUB_API_TOKEN>" ) agent = CodeAgent(tools=[*image_tool_collection.tools], model=model, add_base_tools=True) agent.run("Please draw me a picture of rivers and lakes.") ``` 为了加快启动速度,工具仅在 agent 调用时加载。
smolagents/docs/source/zh/tutorials/tools.md/0
{ "file_path": "smolagents/docs/source/zh/tutorials/tools.md", "repo_id": "smolagents", "token_count": 5019 }
# Shamelessly stolen from Microsoft Autogen team: thanks to them for this great resource! # https://github.com/microsoft/autogen/blob/gaia_multiagent_v01_march_1st/autogen/browser_utils.py import copy from smolagents.models import MessageRole, Model def prepare_response(original_task: str, inner_messages, reformulation_model: Model) -> str: messages = [ { "role": MessageRole.SYSTEM, "content": [ { "type": "text", "text": f"""Earlier you were asked the following: {original_task} Your team then worked diligently to address that request. Read below a transcript of that conversation:""", } ], } ] # The first message just repeats the question, so remove it # if len(inner_messages) > 1: # del inner_messages[0] # copy them to this context try: for message in inner_messages: if not message.get("content"): continue message = copy.deepcopy(message) message["role"] = MessageRole.USER messages.append(message) except Exception: messages += [{"role": MessageRole.ASSISTANT, "content": str(inner_messages)}] # ask for the final answer messages.append( { "role": MessageRole.USER, "content": [ { "type": "text", "text": f""" Read the above conversation and output a FINAL ANSWER to the question. The question is repeated here for convenience: {original_task} To output the final answer, use the following template: FINAL ANSWER: [YOUR FINAL ANSWER] Your FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. ADDITIONALLY, your FINAL ANSWER MUST adhere to any formatting instructions specified in the original question (e.g., alphabetization, sequencing, units, rounding, decimal places, etc.) If you are asked for a number, express it numerically (i.e., with digits rather than words), don't use commas, and DO NOT INCLUDE UNITS such as $ or USD or percent signs unless specified otherwise. If you are asked for a string, don't use articles or abbreviations (e.g. for cities), unless specified otherwise. Don't output any final sentence punctuation such as '.', '!', or '?'. If you are asked for a comma separated list, apply the above rules depending on whether the elements are numbers or strings. If you are unable to determine the final answer, output 'FINAL ANSWER: Unable to determine' """, } ], } ) response = reformulation_model(messages).content final_answer = response.split("FINAL ANSWER: ")[-1].strip() print("> Reformulated answer: ", final_answer) # if "unable to determine" in final_answer.lower(): # messages.append({"role": MessageRole.ASSISTANT, "content": response }) # messages.append({"role": MessageRole.USER, "content": [{"type": "text", "text": """ # I understand that a definitive answer could not be determined. Please make a well-informed EDUCATED GUESS based on the conversation. # To output the educated guess, use the following template: EDUCATED GUESS: [YOUR EDUCATED GUESS] # Your EDUCATED GUESS should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. DO NOT OUTPUT 'I don't know', 'Unable to determine', etc. # ADDITIONALLY, your EDUCATED GUESS MUST adhere to any formatting instructions specified in the original question (e.g., alphabetization, sequencing, units, rounding, decimal places, etc.) # If you are asked for a number, express it numerically (i.e., with digits rather than words), don't use commas, and don't include units such as $ or percent signs unless specified otherwise. # If you are asked for a string, don't use articles or abbreviations (e.g. cit for cities), unless specified otherwise. Don't output any final sentence punctuation such as '.', '!', or '?'. # If you are asked for a comma separated list, apply the above rules depending on whether the elements are numbers or strings. # """.strip()}]}) # response = model(messages).content # print("\n>>>Making an educated guess.\n", response) # final_answer = response.split("EDUCATED GUESS: ")[-1].strip() return final_answer
smolagents/examples/open_deep_research/scripts/reformulator.py/0
{ "file_path": "smolagents/examples/open_deep_research/scripts/reformulator.py", "repo_id": "smolagents", "token_count": 1514 }
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import pickle import re import textwrap from io import BytesIO from typing import Any, List, Tuple from PIL import Image from .tool_validation import validate_tool_attributes from .tools import Tool from .utils import BASE_BUILTIN_MODULES, instance_to_source try: from dotenv import load_dotenv load_dotenv() except ModuleNotFoundError: pass class E2BExecutor: def __init__(self, additional_imports: List[str], tools: List[Tool], logger): self.logger = logger try: from e2b_code_interpreter import Sandbox except ModuleNotFoundError: raise ModuleNotFoundError( """Please install 'e2b' extra to use E2BExecutor: `pip install "smolagents[e2b]"`""" ) self.logger = logger self.logger.log("Initializing E2B executor, hold on...") self.custom_tools = {} self.final_answer = False self.final_answer_pattern = re.compile(r"final_answer\((.*?)\)") self.sbx = Sandbox() # "qywp2ctmu2q7jzprcf4j") # TODO: validate installing agents package or not # print("Installing agents package on remote executor...") # self.sbx.commands.run( # "pip install git+https://github.com/huggingface/smolagents.git", # timeout=300 # ) # print("Installation of agents package finished.") additional_imports = additional_imports + ["smolagents"] if len(additional_imports) > 0: execution = self.sbx.commands.run("pip install " + " ".join(additional_imports)) if execution.error: raise Exception(f"Error installing dependencies: {execution.error}") else: logger.log(f"Installation of {additional_imports} succeeded!", 0) tool_codes = [] for tool in tools: validate_tool_attributes(tool.__class__, check_imports=False) tool_code = instance_to_source(tool, base_cls=Tool) tool_code = tool_code.replace("from smolagents.tools import Tool", "") tool_code += f"\n{tool.name} = {tool.__class__.__name__}()\n" tool_codes.append(tool_code) tool_definition_code = "\n".join([f"import {module}" for module in BASE_BUILTIN_MODULES]) tool_definition_code += textwrap.dedent( """ class Tool: def __call__(self, *args, **kwargs): return self.forward(*args, **kwargs) def forward(self, *args, **kwargs): pass # to be implemented in child class """ ) tool_definition_code += "\n\n".join(tool_codes) tool_definition_execution = self.run_code_raise_errors(tool_definition_code) self.logger.log(tool_definition_execution.logs) def run_code_raise_errors(self, code: str): if self.final_answer_pattern.search(code) is not None: self.final_answer = True execution = self.sbx.run_code( code, ) if execution.error: execution_logs = "\n".join([str(log) for log in execution.logs.stdout]) logs = execution_logs logs += "Executing code yielded an error:" logs += execution.error.name logs += execution.error.value logs += execution.error.traceback raise ValueError(logs) return execution def __call__(self, code_action: str, additional_args: dict) -> Tuple[Any, Any]: if len(additional_args) > 0: # Pickle additional_args to server import tempfile with tempfile.NamedTemporaryFile() as f: pickle.dump(additional_args, f) f.flush() with open(f.name, "rb") as file: self.sbx.files.write("/home/state.pkl", file) remote_unloading_code = """import pickle import os print("File path", os.path.getsize('/home/state.pkl')) with open('/home/state.pkl', 'rb') as f: pickle_dict = pickle.load(f) locals().update({key: value for key, value in pickle_dict.items()}) """ execution = self.run_code_raise_errors(remote_unloading_code) execution_logs = "\n".join([str(log) for log in execution.logs.stdout]) self.logger.log(execution_logs, 1) execution = self.run_code_raise_errors(code_action) execution_logs = "\n".join([str(log) for log in execution.logs.stdout]) if not execution.results: return None, execution_logs, self.final_answer else: for result in execution.results: if result.is_main_result: for attribute_name in ["jpeg", "png"]: if getattr(result, attribute_name) is not None: image_output = getattr(result, attribute_name) decoded_bytes = base64.b64decode(image_output.encode("utf-8")) return Image.open(BytesIO(decoded_bytes)), execution_logs, self.final_answer for attribute_name in [ "chart", "data", "html", "javascript", "json", "latex", "markdown", "pdf", "svg", "text", ]: if getattr(result, attribute_name) is not None: return getattr(result, attribute_name), execution_logs, self.final_answer if self.final_answer: raise ValueError("No main result returned by executor!") return None, execution_logs, False __all__ = ["E2BExecutor"]
smolagents/src/smolagents/e2b_executor.py/0
{ "file_path": "smolagents/src/smolagents/e2b_executor.py", "repo_id": "smolagents", "token_count": 2958 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import os import re import shutil import subprocess import tempfile import traceback from pathlib import Path from typing import List import pytest from dotenv import load_dotenv class SubprocessCallException(Exception): pass def run_command(command: List[str], return_stdout=False, env=None): """ Runs command with subprocess.check_output and returns stdout if requested. Properly captures and handles errors during command execution. """ for i, c in enumerate(command): if isinstance(c, Path): command[i] = str(c) if env is None: env = os.environ.copy() try: output = subprocess.check_output(command, stderr=subprocess.STDOUT, env=env) if return_stdout: if hasattr(output, "decode"): output = output.decode("utf-8") return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}" ) from e class DocCodeExtractor: """Handles extraction and validation of Python code from markdown files.""" @staticmethod def extract_python_code(content: str) -> List[str]: """Extract Python code blocks from markdown content.""" pattern = r"```(?:python|py)\n(.*?)\n```" matches = re.finditer(pattern, content, re.DOTALL) return [match.group(1).strip() for match in matches] @staticmethod def create_test_script(code_blocks: List[str], tmp_dir: str) -> Path: """Create a temporary Python script from code blocks.""" combined_code = "\n\n".join(code_blocks) assert len(combined_code) > 0, "Code is empty!" tmp_file = Path(tmp_dir) / "test_script.py" with open(tmp_file, "w", encoding="utf-8") as f: f.write(combined_code) return tmp_file @pytest.mark.skipif(not os.getenv("RUN_ALL"), reason="RUN_ALL environment variable not set") class TestDocs: """Test case for documentation code testing.""" @classmethod def setup_class(cls): cls._tmpdir = tempfile.mkdtemp() cls.launch_args = ["python3"] cls.docs_dir = Path(__file__).parent.parent / "docs" / "source" / "en" cls.extractor = DocCodeExtractor() if not cls.docs_dir.exists(): raise ValueError(f"Docs directory not found at {cls.docs_dir}") load_dotenv() cls.md_files = list(cls.docs_dir.rglob("*.md")) if not cls.md_files: raise ValueError(f"No markdown files found in {cls.docs_dir}") @classmethod def teardown_class(cls): shutil.rmtree(cls._tmpdir) @pytest.mark.timeout(100) def test_single_doc(self, doc_path: Path): """Test a single documentation file.""" with open(doc_path, "r", encoding="utf-8") as f: content = f.read() code_blocks = self.extractor.extract_python_code(content) excluded_snippets = [ "ToolCollection", "image_generation_tool", # We don't want to run this expensive operation "from_langchain", # Langchain is not a dependency "while llm_should_continue(memory):", # This is pseudo code "ollama_chat/llama3.2", # Exclude ollama building in guided tour "model = TransformersModel(model_id=model_id)", # Exclude testing with transformers model "SmolagentsInstrumentor", # Exclude telemetry since it needs additional installs ] code_blocks = [ block for block in code_blocks if not any( [snippet in block for snippet in excluded_snippets] ) # Exclude these tools that take longer to run and add dependencies ] if len(code_blocks) == 0: pytest.skip(f"No Python code blocks found in {doc_path.name}") # Validate syntax of each block individually by parsing it for i, block in enumerate(code_blocks, 1): ast.parse(block) # Create and execute test script print("\n\nCollected code block:==========\n".join(code_blocks)) try: code_blocks = [ ( block.replace("<YOUR_HUGGINGFACEHUB_API_TOKEN>", os.getenv("HF_TOKEN")) .replace("YOUR_ANTHROPIC_API_KEY", os.getenv("ANTHROPIC_API_KEY")) .replace("{your_username}", "m-ric") ) for block in code_blocks ] test_script = self.extractor.create_test_script(code_blocks, self._tmpdir) run_command(self.launch_args + [str(test_script)]) except SubprocessCallException as e: pytest.fail(f"\nError while testing {doc_path.name}:\n{str(e)}") except Exception: pytest.fail(f"\nUnexpected error while testing {doc_path.name}:\n{traceback.format_exc()}") @pytest.fixture(autouse=True) def _setup(self): """Fixture to ensure temporary directory exists for each test.""" os.makedirs(self._tmpdir, exist_ok=True) yield # Clean up test files after each test for file in Path(self._tmpdir).glob("*"): file.unlink() def pytest_generate_tests(metafunc): """Generate test cases for each markdown file.""" if "doc_path" in metafunc.fixturenames: test_class = metafunc.cls # Initialize the class if needed if not hasattr(test_class, "md_files"): test_class.setup_class() # Parameterize with the markdown files metafunc.parametrize("doc_path", test_class.md_files, ids=[f.stem for f in test_class.md_files])
smolagents/tests/test_all_docs.py/0
{ "file_path": "smolagents/tests/test_all_docs.py", "repo_id": "smolagents", "token_count": 2625 }
# This file instructs Redocly's linter to ignore the rules contained for specific parts of your API. # See https://redoc.ly/docs/cli/ for more information. docs/openapi.json: no-empty-servers: - '#/openapi' spec: - >- #/components/schemas/GenerateParameters/properties/best_of/exclusiveMinimum - >- #/components/schemas/GenerateParameters/properties/frequency_penalty/exclusiveMinimum - '#/components/schemas/GenerateParameters/properties/grammar/nullable' - >- #/components/schemas/GenerateParameters/properties/repetition_penalty/exclusiveMinimum - '#/components/schemas/GenerateParameters/properties/seed/exclusiveMinimum' - >- #/components/schemas/GenerateParameters/properties/temperature/exclusiveMinimum - '#/components/schemas/GenerateParameters/properties/top_k/exclusiveMinimum' - >- #/components/schemas/GenerateParameters/properties/top_n_tokens/exclusiveMinimum - '#/components/schemas/GenerateParameters/properties/top_p/exclusiveMinimum' - >- #/components/schemas/GenerateParameters/properties/typical_p/exclusiveMinimum - '#/components/schemas/GenerateResponse/properties/details/nullable' - '#/components/schemas/StreamResponse/properties/details/nullable' - '#/components/schemas/ChatRequest/properties/response_format/nullable' - '#/components/schemas/ChatRequest/properties/stream_options/nullable' - '#/components/schemas/ChatRequest/properties/tool_choice/nullable' - '#/components/schemas/ToolChoice/nullable' - '#/components/schemas/ChatCompletionComplete/properties/logprobs/nullable' - '#/components/schemas/ChatCompletionChunk/properties/usage/nullable' - '#/components/schemas/ChatCompletionChoice/properties/logprobs/nullable' no-invalid-media-type-examples: - '#/paths/~1/post/responses/422/content/application~1json/example' - '#/paths/~1/post/responses/424/content/application~1json/example' - '#/paths/~1/post/responses/429/content/application~1json/example' - '#/paths/~1/post/responses/500/content/application~1json/example' - '#/paths/~1generate/post/responses/422/content/application~1json/example' - '#/paths/~1generate/post/responses/424/content/application~1json/example' - '#/paths/~1generate/post/responses/429/content/application~1json/example' - '#/paths/~1generate/post/responses/500/content/application~1json/example' - >- #/paths/~1generate_stream/post/responses/422/content/text~1event-stream/example - >- #/paths/~1generate_stream/post/responses/424/content/text~1event-stream/example - >- #/paths/~1generate_stream/post/responses/429/content/text~1event-stream/example - >- #/paths/~1generate_stream/post/responses/500/content/text~1event-stream/example - '#/paths/~1tokenize/post/responses/404/content/application~1json/example' - >- #/paths/~1v1~1chat~1completions/post/responses/422/content/application~1json/example - >- #/paths/~1v1~1chat~1completions/post/responses/424/content/application~1json/example - >- #/paths/~1v1~1chat~1completions/post/responses/429/content/application~1json/example - >- #/paths/~1v1~1chat~1completions/post/responses/500/content/application~1json/example - >- #/paths/~1v1~1completions/post/responses/422/content/application~1json/example - >- #/paths/~1v1~1completions/post/responses/424/content/application~1json/example - >- #/paths/~1v1~1completions/post/responses/429/content/application~1json/example - >- #/paths/~1v1~1completions/post/responses/500/content/application~1json/example operation-4xx-response: - '#/paths/~1health/get/responses' - '#/paths/~1info/get/responses' - '#/paths/~1metrics/get/responses' no-unused-components: - '#/components/schemas/Completion' security-defined: - '#/paths/~1/post' - '#/paths/~1generate/post' - '#/paths/~1generate_stream/post' - '#/paths/~1health/get' - '#/paths/~1info/get' - '#/paths/~1metrics/get' - '#/paths/~1tokenize/post' - '#/paths/~1v1~1chat~1completions/post' - '#/paths/~1v1~1completions/post' - '#/paths/~1v1~1models/get'
text-generation-inference/.redocly.lint-ignore.yaml/0
{ "file_path": "text-generation-inference/.redocly.lint-ignore.yaml", "repo_id": "text-generation-inference", "token_count": 1750 }
// // Created by mfuntowicz on 11/16/24. // #include <catch2/catch_all.hpp> #include "../csrc/hardware.hpp" using namespace huggingface::tgi::hardware::cuda; TEST_CASE("is_at_least_<arch>") { const static auto VOLTA_CAPABILITIES = compute_capabilities_t(7, 0); REQUIRE(VOLTA_CAPABILITIES.is_at_least_volta()); REQUIRE_FALSE(VOLTA_CAPABILITIES.is_at_least_turing()); REQUIRE_FALSE(VOLTA_CAPABILITIES.is_at_least_ampere()); REQUIRE_FALSE(VOLTA_CAPABILITIES.is_at_least_ada_lovelace()); REQUIRE_FALSE(VOLTA_CAPABILITIES.is_at_least_hopper()); const static auto TURING_CAPABILITIES = compute_capabilities_t(7, 5); REQUIRE(TURING_CAPABILITIES.is_at_least_volta()); REQUIRE(TURING_CAPABILITIES.is_at_least_turing()); REQUIRE_FALSE(TURING_CAPABILITIES.is_at_least_ampere()); REQUIRE_FALSE(TURING_CAPABILITIES.is_at_least_ada_lovelace()); REQUIRE_FALSE(TURING_CAPABILITIES.is_at_least_hopper()); const static auto AMPERE_CAPABILITIES = compute_capabilities_t(8, 0); REQUIRE(AMPERE_CAPABILITIES.is_at_least_volta()); REQUIRE(AMPERE_CAPABILITIES.is_at_least_turing()); REQUIRE(AMPERE_CAPABILITIES.is_at_least_ampere()); REQUIRE_FALSE(AMPERE_CAPABILITIES.is_at_least_ada_lovelace()); REQUIRE_FALSE(AMPERE_CAPABILITIES.is_at_least_hopper()); const static auto ADA_LOVELACE_CAPABILITIES = compute_capabilities_t(8, 9); REQUIRE(ADA_LOVELACE_CAPABILITIES.is_at_least_volta()); REQUIRE(ADA_LOVELACE_CAPABILITIES.is_at_least_turing()); REQUIRE(ADA_LOVELACE_CAPABILITIES.is_at_least_ampere()); REQUIRE(ADA_LOVELACE_CAPABILITIES.is_at_least_ada_lovelace()); REQUIRE_FALSE(ADA_LOVELACE_CAPABILITIES.is_at_least_hopper()); const static auto HOPPER_CAPABILITIES = compute_capabilities_t(9, 0); REQUIRE(HOPPER_CAPABILITIES.is_at_least_volta()); REQUIRE(HOPPER_CAPABILITIES.is_at_least_turing()); REQUIRE(HOPPER_CAPABILITIES.is_at_least_ampere()); REQUIRE(HOPPER_CAPABILITIES.is_at_least_ada_lovelace()); REQUIRE(HOPPER_CAPABILITIES.is_at_least_hopper()); } TEST_CASE("is_at_least") { const static auto VOLTA_CAPABILITIES = compute_capabilities_t(7, 0); REQUIRE(VOLTA_CAPABILITIES.is_at_least(VOLTA)); REQUIRE_FALSE(VOLTA_CAPABILITIES.is_at_least(TURING)); REQUIRE_FALSE(VOLTA_CAPABILITIES.is_at_least(AMPERE)); REQUIRE_FALSE(VOLTA_CAPABILITIES.is_at_least(ADA_LOVELACE)); REQUIRE_FALSE(VOLTA_CAPABILITIES.is_at_least(HOPPER)); const static auto TURING_CAPABILITIES = compute_capabilities_t(7, 5); REQUIRE(TURING_CAPABILITIES.is_at_least(VOLTA)); REQUIRE(TURING_CAPABILITIES.is_at_least(TURING)); REQUIRE_FALSE(TURING_CAPABILITIES.is_at_least(AMPERE)); REQUIRE_FALSE(TURING_CAPABILITIES.is_at_least(ADA_LOVELACE)); REQUIRE_FALSE(TURING_CAPABILITIES.is_at_least(HOPPER)); const static auto AMPERE_CAPABILITIES = compute_capabilities_t(8, 0); REQUIRE(AMPERE_CAPABILITIES.is_at_least(VOLTA)); REQUIRE(AMPERE_CAPABILITIES.is_at_least(TURING)); REQUIRE(AMPERE_CAPABILITIES.is_at_least(AMPERE)); REQUIRE_FALSE(AMPERE_CAPABILITIES.is_at_least(ADA_LOVELACE)); REQUIRE_FALSE(AMPERE_CAPABILITIES.is_at_least(HOPPER)); const static auto ADA_LOVELACE_CAPABILITIES = compute_capabilities_t(8, 9); REQUIRE(ADA_LOVELACE_CAPABILITIES.is_at_least(VOLTA)); REQUIRE(ADA_LOVELACE_CAPABILITIES.is_at_least(TURING)); REQUIRE(ADA_LOVELACE_CAPABILITIES.is_at_least(AMPERE)); REQUIRE(ADA_LOVELACE_CAPABILITIES.is_at_least(ADA_LOVELACE)); REQUIRE_FALSE(ADA_LOVELACE_CAPABILITIES.is_at_least(HOPPER)); const static auto HOPPER_CAPABILITIES = compute_capabilities_t (9, 0); REQUIRE(HOPPER_CAPABILITIES.is_at_least(VOLTA)); REQUIRE(HOPPER_CAPABILITIES.is_at_least(TURING)); REQUIRE(HOPPER_CAPABILITIES.is_at_least(AMPERE)); REQUIRE(HOPPER_CAPABILITIES.is_at_least(ADA_LOVELACE)); REQUIRE(HOPPER_CAPABILITIES.is_at_least(HOPPER)); }
text-generation-inference/backends/trtllm/tests/test_hardware.cpp/0
{ "file_path": "text-generation-inference/backends/trtllm/tests/test_hardware.cpp", "repo_id": "text-generation-inference", "token_count": 1738 }
<html> <head> <!-- Load the latest Swagger UI code and style from npm using unpkg.com --> <script src="https://unpkg.com/swagger-ui-dist@3/swagger-ui-bundle.js"></script> <link rel="stylesheet" type="text/css" href="https://unpkg.com/swagger-ui-dist@3/swagger-ui.css"/> <title>Text Generation Inference API</title> </head> <body> <div id="swagger-ui"></div> <!-- Div to hold the UI component --> <script> window.onload = function () { // Begin Swagger UI call region const ui = SwaggerUIBundle({ url: "openapi.json", //Location of Open API spec in the repo dom_id: '#swagger-ui', deepLinking: true, supportedSubmitMethods: [], presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], plugins: [ SwaggerUIBundle.plugins.DownloadUrl ], }) window.ui = ui } </script> </body> </html>
text-generation-inference/docs/index.html/0
{ "file_path": "text-generation-inference/docs/index.html", "repo_id": "text-generation-inference", "token_count": 653 }
# Using TGI with Nvidia GPUs TGI optimized models are supported on NVIDIA [H100](https://www.nvidia.com/en-us/data-center/h100/), [A100](https://www.nvidia.com/en-us/data-center/a100/), [A10G](https://www.nvidia.com/en-us/data-center/products/a10-gpu/) and [T4](https://www.nvidia.com/en-us/data-center/tesla-t4/) GPUs with CUDA 12.2+. Note that you have to install [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) to use it. For other NVIDIA GPUs, continuous batching will still apply, but some operations like flash attention and paged attention will not be executed. TGI can be used on NVIDIA GPUs through its official docker image: ```bash model=teknium/OpenHermes-2.5-Mistral-7B volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --gpus all --shm-size 64g -p 8080:80 -v $volume:/data \ ghcr.io/huggingface/text-generation-inference:3.1.0 \ --model-id $model ``` The launched TGI server can then be queried from clients, make sure to check out the [Consuming TGI](./basic_tutorials/consuming_tgi) guide.
text-generation-inference/docs/source/installation_nvidia.md/0
{ "file_path": "text-generation-inference/docs/source/installation_nvidia.md", "repo_id": "text-generation-inference", "token_count": 377 }
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 15, "logprob": null, "text": "," }, { "id": 1669, "logprob": -5.4453125, "text": " il" }, { "id": 11580, "logprob": -2.3378906, "text": " faut" }, { "id": 3913, "logprob": -4.3320312, "text": " tout" }, { "id": 39261, "logprob": -2.9160156, "text": " d'abord" } ], "seed": 0, "tokens": [ { "id": 408, "logprob": -0.16687012, "special": false, "text": " que" }, { "id": 366, "logprob": -1.5517578, "special": false, "text": " la" }, { "id": 8769, "logprob": -0.16687012, "special": false, "text": " personne" }, { "id": 1479, "logprob": -2.1035156, "special": false, "text": " qui" }, { "id": 143926, "logprob": -2.8671875, "special": false, "text": " réalise" }, { "id": 578, "logprob": 0.0, "special": false, "text": " le" }, { "id": 8138, "logprob": -0.66748047, "special": false, "text": " projet" }, { "id": 795, "logprob": -1.6279297, "special": false, "text": " ne" }, { "id": 9802, "logprob": -0.47875977, "special": false, "text": " soit" }, { "id": 1230, "logprob": 0.0, "special": false, "text": " pas" } ], "top_tokens": null }, "generated_text": "Pour déguster un ortolan, il faut tout d'abord que la personne qui réalise le projet ne soit pas" }
text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_all_params.json", "repo_id": "text-generation-inference", "token_count": 1204 }
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 4, "prefill": [], "seed": 0, "tokens": [ { "id": 2143, "logprob": -1.828125, "special": false, "text": " sent" }, { "id": 10081, "logprob": -0.41210938, "special": false, "text": " successfully" }, { "id": 13, "logprob": 0.0, "special": false, "text": "." }, { "id": 100001, "logprob": -0.16015625, "special": true, "text": "<|end▁of▁sentence|>" } ], "top_tokens": null }, "generated_text": "Test request sent successfully." }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_deepseek_v2/test_flash_deepseek_v2_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_deepseek_v2/test_flash_deepseek_v2_all_params.json", "repo_id": "text-generation-inference", "token_count": 424 }
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 198, "logprob": -2.5742188, "special": false, "text": "\n" }, { "id": 262, "logprob": -1.6230469, "special": false, "text": " " }, { "id": 3270, "logprob": -2.046875, "special": false, "text": " \"\"\"\n" }, { "id": 262, "logprob": -0.015281677, "special": false, "text": " " }, { "id": 422, "logprob": -2.1425781, "special": false, "text": " if" }, { "id": 1715, "logprob": -0.9238281, "special": false, "text": " request" }, { "id": 13204, "logprob": -0.076660156, "special": false, "text": ".method" }, { "id": 624, "logprob": -0.021987915, "special": false, "text": " ==" }, { "id": 364, "logprob": -0.39208984, "special": false, "text": " '" }, { "id": 3019, "logprob": -0.10821533, "special": false, "text": "POST" } ], "top_tokens": null }, "generated_text": "\n \"\"\"\n if request.method == 'POST" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq.json", "repo_id": "text-generation-inference", "token_count": 883 }
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 20, "prefill": [], "seed": null, "tokens": [ { "id": 108, "logprob": -0.73046875, "special": false, "text": "\n" }, { "id": 30234, "logprob": -2.328125, "special": false, "text": "Brown" }, { "id": 108, "logprob": -0.12060547, "special": false, "text": "\n" }, { "id": 3726, "logprob": -1.7734375, "special": false, "text": "Car" }, { "id": 108, "logprob": -0.041503906, "special": false, "text": "\n" }, { "id": 2915, "logprob": -1.796875, "special": false, "text": "Color" }, { "id": 108, "logprob": -0.039794922, "special": false, "text": "\n" }, { "id": 19178, "logprob": -1.96875, "special": false, "text": "Cool" }, { "id": 108, "logprob": -0.080566406, "special": false, "text": "\n" }, { "id": 40544, "logprob": -2.1875, "special": false, "text": "Decor" }, { "id": 108, "logprob": -0.033935547, "special": false, "text": "\n" }, { "id": 13936, "logprob": -1.6328125, "special": false, "text": "Green" }, { "id": 108, "logprob": -0.16210938, "special": false, "text": "\n" }, { "id": 955, "logprob": -2.015625, "special": false, "text": "..." }, { "id": 108, "logprob": -0.14746094, "special": false, "text": "\n" }, { "id": 955, "logprob": -0.73828125, "special": false, "text": "..." }, { "id": 108, "logprob": -0.051513672, "special": false, "text": "\n" }, { "id": 955, "logprob": -0.34765625, "special": false, "text": "..." }, { "id": 108, "logprob": -0.020141602, "special": false, "text": "\n" }, { "id": 955, "logprob": -0.11767578, "special": false, "text": "..." } ], "top_tokens": null }, "generated_text": "\nBrown\nCar\nColor\nCool\nDecor\nGreen\n...\n...\n...\n..." }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_pali_gemma2/test_flash_pali_gemma_image.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_pali_gemma2/test_flash_pali_gemma_image.json", "repo_id": "text-generation-inference", "token_count": 1632 }
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -0.83984375, "text": " is" }, { "id": 18147, "logprob": -12.8125, "text": " Deep" }, { "id": 20727, "logprob": -2.84375, "text": " Learning" }, { "id": 32, "logprob": -1.25, "text": "?" } ], "seed": null, "tokens": [ { "id": 187, "logprob": -0.37890625, "special": false, "text": "\n" }, { "id": 187, "logprob": -0.4296875, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.078125, "special": false, "text": "Deep" }, { "id": 4715, "logprob": -0.515625, "special": false, "text": " learning" }, { "id": 310, "logprob": -0.6015625, "special": false, "text": " is" }, { "id": 247, "logprob": -0.65625, "special": false, "text": " a" }, { "id": 747, "logprob": -2.109375, "special": false, "text": " new" }, { "id": 1511, "logprob": -2.328125, "special": false, "text": " type" }, { "id": 273, "logprob": -0.0032653809, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.28125, "special": false, "text": " machine" } ], "top_tokens": null }, "generated_text": "\n\nDeep learning is a new type of machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -0.80078125, "text": " is" }, { "id": 18147, "logprob": -13.25, "text": " Deep" }, { "id": 20727, "logprob": -2.828125, "text": " Learning" }, { "id": 32, "logprob": -1.1953125, "text": "?" } ], "seed": null, "tokens": [ { "id": 187, "logprob": -0.296875, "special": false, "text": "\n" }, { "id": 187, "logprob": -0.3359375, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.2578125, "special": false, "text": "Deep" }, { "id": 4715, "logprob": -0.5546875, "special": false, "text": " learning" }, { "id": 310, "logprob": -0.62890625, "special": false, "text": " is" }, { "id": 247, "logprob": -0.64453125, "special": false, "text": " a" }, { "id": 747, "logprob": -2.078125, "special": false, "text": " new" }, { "id": 1511, "logprob": -2.28125, "special": false, "text": " type" }, { "id": 273, "logprob": -0.0030670166, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.3125, "special": false, "text": " machine" } ], "top_tokens": null }, "generated_text": "\n\nDeep learning is a new type of machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -0.80078125, "text": " is" }, { "id": 18147, "logprob": -13.25, "text": " Deep" }, { "id": 20727, "logprob": -2.828125, "text": " Learning" }, { "id": 32, "logprob": -1.1953125, "text": "?" } ], "seed": null, "tokens": [ { "id": 187, "logprob": -0.296875, "special": false, "text": "\n" }, { "id": 187, "logprob": -0.3359375, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.2578125, "special": false, "text": "Deep" }, { "id": 4715, "logprob": -0.5546875, "special": false, "text": " learning" }, { "id": 310, "logprob": -0.62890625, "special": false, "text": " is" }, { "id": 247, "logprob": -0.64453125, "special": false, "text": " a" }, { "id": 747, "logprob": -2.078125, "special": false, "text": " new" }, { "id": 1511, "logprob": -2.28125, "special": false, "text": " type" }, { "id": 273, "logprob": -0.0030670166, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.3125, "special": false, "text": " machine" } ], "top_tokens": null }, "generated_text": "\n\nDeep learning is a new type of machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -0.80078125, "text": " is" }, { "id": 18147, "logprob": -13.25, "text": " Deep" }, { "id": 20727, "logprob": -2.828125, "text": " Learning" }, { "id": 32, "logprob": -1.1953125, "text": "?" } ], "seed": null, "tokens": [ { "id": 187, "logprob": -0.296875, "special": false, "text": "\n" }, { "id": 187, "logprob": -0.3359375, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.2578125, "special": false, "text": "Deep" }, { "id": 4715, "logprob": -0.5546875, "special": false, "text": " learning" }, { "id": 310, "logprob": -0.62890625, "special": false, "text": " is" }, { "id": 247, "logprob": -0.64453125, "special": false, "text": " a" }, { "id": 747, "logprob": -2.078125, "special": false, "text": " new" }, { "id": 1511, "logprob": -2.28125, "special": false, "text": " type" }, { "id": 273, "logprob": -0.0030670166, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.3125, "special": false, "text": " machine" } ], "top_tokens": null }, "generated_text": "\n\nDeep learning is a new type of machine" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_mamba/test_mamba_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mamba/test_mamba_load.json", "repo_id": "text-generation-inference", "token_count": 5458 }
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2099609, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2451172, "special": false, "text": " " }, { "id": 1956, "logprob": -0.3322754, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19213867, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030151367, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" }
text-generation-inference/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded.json", "repo_id": "text-generation-inference", "token_count": 680 }
import pytest @pytest.fixture(scope="module") def compressed_tensors_w8a8_int_handle(launcher): with launcher( "neuralmagic/Llama-3.2-3B-Instruct-quantized.w8a8", num_shard=2, quantize="compressed-tensors", ) as handle: yield handle @pytest.fixture(scope="module") async def compressed_tensors_w8a8_int(compressed_tensors_w8a8_int_handle): await compressed_tensors_w8a8_int_handle.health(300) return compressed_tensors_w8a8_int_handle.client @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_compressed_tensors_w8a8_int( compressed_tensors_w8a8_int, response_snapshot ): response = await compressed_tensors_w8a8_int.generate( "What is deep learning?", max_new_tokens=10, decoder_input_details=True, ) assert ( response.generated_text == " and how does it differ from traditional machine learning?\n" ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_compressed_tensors_w8a8_int_all_params( compressed_tensors_w8a8_int, response_snapshot ): response = await compressed_tensors_w8a8_int.generate( "What is deep learning", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert ( response.generated_text == "What is deep learning?\nDeep learning, also known as neural network or" ) assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_compressed_tensors_w8a8_int_load( compressed_tensors_w8a8_int, generate_load, response_snapshot ): responses = await generate_load( compressed_tensors_w8a8_int, "What is deep learning?", max_new_tokens=10, n=4, ) assert ( responses[0].generated_text == " and how does it differ from traditional machine learning?\n" ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_compressed_tensors_w8a8_int.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_compressed_tensors_w8a8_int.py", "repo_id": "text-generation-inference", "token_count": 1071 }
import pytest @pytest.fixture(scope="module") def flash_llama_exl2_handle(launcher): with launcher( "turboderp/Llama-3-8B-Instruct-exl2", revision="2.5bpw", # Set max input length to avoid OOM due to extremely large # scratch buffer. max_input_length=1024, num_shard=1, quantize="exl2", ) as handle: yield handle @pytest.fixture(scope="module") async def flash_llama_exl2(flash_llama_exl2_handle): await flash_llama_exl2_handle.health(300) return flash_llama_exl2_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_exl2(flash_llama_exl2, ignore_logprob_response_snapshot): response = await flash_llama_exl2.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == ignore_logprob_response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_exl2_all_params( flash_llama_exl2, ignore_logprob_response_snapshot ): response = await flash_llama_exl2.generate( "Test request", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert ( response.generated_text == 'Test request. The server responds with a "200 OK"' ) assert response == ignore_logprob_response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_exl2_load( flash_llama_exl2, generate_load, ignore_logprob_response_snapshot ): responses = await generate_load( flash_llama_exl2, "Test request", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == ignore_logprob_response_snapshot
text-generation-inference/integration-tests/models/test_flash_llama_exl2.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_llama_exl2.py", "repo_id": "text-generation-inference", "token_count": 886 }
import pytest @pytest.fixture(scope="module") def flash_pali_gemma_handle(launcher): with launcher( "google/paligemma2-3b-pt-224", ) as handle: yield handle @pytest.fixture(scope="module") async def flash_pali_gemma(flash_pali_gemma_handle): await flash_pali_gemma_handle.health(300) return flash_pali_gemma_handle.client async def test_flash_pali_gemma_image(flash_pali_gemma, response_snapshot): car_image = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg" response = await flash_pali_gemma.generate( f"![]({car_image})", max_new_tokens=20, ) assert ( response.generated_text == "\nBrown\nCar\nColor\nCool\nDecor\nGreen\n...\n...\n...\n..." ) assert response == response_snapshot
text-generation-inference/integration-tests/models/test_flash_pali_gemma2.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_pali_gemma2.py", "repo_id": "text-generation-inference", "token_count": 361 }
import pytest import requests @pytest.fixture(scope="module") def lora_mistral_handle(launcher): with launcher( "mistralai/Mistral-7B-v0.1", lora_adapters=[ "predibase/dbpedia", "predibase/customer_support", ], cuda_graphs=[0], ) as handle: yield handle @pytest.fixture(scope="module") async def lora_mistral(lora_mistral_handle): await lora_mistral_handle.health(300) return lora_mistral_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_lora_mistral(lora_mistral, response_snapshot): response = await lora_mistral.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 classification_prompt = """You are given the title and the body of an article below. Please determine the type of the article.\n### Title: Great White Whale\n\n### Body: Great White Whale is the debut album by the Canadian rock band Secret and Whisper. The album was in the works for about a year and was released on February 12 2008. A music video was shot in Pittsburgh for the album's first single XOXOXO. The album reached number 17 on iTunes's top 100 albums in its first week on sale.\n\n### Article Type:""" @pytest.mark.asyncio @pytest.mark.private async def test_lora_mistral_without_adapter(lora_mistral, response_snapshot): response = requests.post( f"{lora_mistral.base_url}/generate", headers=lora_mistral.headers, json={ "inputs": classification_prompt, "parameters": { "max_new_tokens": 40, "details": True, }, }, ) assert response.status_code == 200 data = response.json() assert ( data["generated_text"] == "\n\n### 1. News\n### 2. Blog\n### 3. Article\n### 4. Review\n### 5. Other\n\n\n\n\n\n\n\n\n" ) assert data == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_lora_mistral_with_dbpedia_adapter(lora_mistral, response_snapshot): response = requests.post( f"{lora_mistral.base_url}/generate", headers=lora_mistral.headers, json={ "inputs": classification_prompt, "parameters": { "max_new_tokens": 40, "adapter_id": "predibase/dbpedia", "details": True, }, }, ) assert response.status_code == 200 data = response.json() assert data["generated_text"] == " 11" assert data == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_lora_mistral_with_customer_support_adapter( lora_mistral, response_snapshot ): print(lora_mistral.base_url) print(lora_mistral.headers) response = requests.post( f"{lora_mistral.base_url}/generate", headers=lora_mistral.headers, json={ "inputs": "What are 3 unique words that describe you?", "parameters": { "max_new_tokens": 40, "adapter_id": "predibase/customer_support", "details": True, }, }, ) assert response.status_code == 200 data = response.json() assert ( data["generated_text"] == "\n\nI’m not sure if I can come up with 3 unique words that describe me, but I’ll try.\n\n1. Creative\n2. Funny\n3." ) assert data == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_lora_mistral_without_customer_support_adapter( lora_mistral, response_snapshot ): response = requests.post( f"{lora_mistral.base_url}/generate", headers=lora_mistral.headers, json={ "inputs": "What are 3 unique words that describe you?", "parameters": { "max_new_tokens": 40, "details": True, }, }, ) assert response.status_code == 200 data = response.json() assert ( data["generated_text"] == "\n\nI’m a very passionate person. I’m very driven. I’m very determined.\n\nWhat is your favorite thing about being a teacher?\n\nI love the fact" ) assert data == response_snapshot
text-generation-inference/integration-tests/models/test_lora_mistral.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_lora_mistral.py", "repo_id": "text-generation-inference", "token_count": 1873 }
{ dockerTools, cacert, text-generation-inference, stream ? false, }: let build = if stream then dockerTools.streamLayeredImage else dockerTools.buildLayeredImage; in build { name = "tgi-docker"; tag = "latest"; config = { EntryPoint = [ "${text-generation-inference}/bin/text-generation-inference" ]; Env = [ "HF_HOME=/data" "PORT=80" ]; }; contents = [ cacert ]; }
text-generation-inference/nix/docker.nix/0
{ "file_path": "text-generation-inference/nix/docker.nix", "repo_id": "text-generation-inference", "token_count": 160 }
use crate::infer::Infer; use crate::server::{chat_completions, compat_generate, completions, ComputeType}; use crate::{ ChatCompletion, ChatCompletionChunk, ChatRequest, Chunk, CompatGenerateRequest, CompletionFinal, CompletionRequest, ErrorResponse, GenerateResponse, Info, StreamResponse, }; use axum::extract::Extension; use axum::http::StatusCode; use axum::response::Response; use axum::Json; use serde::{Deserialize, Serialize}; use tracing::instrument; use utoipa::ToSchema; #[derive(Clone, Deserialize, ToSchema)] #[serde(untagged)] pub(crate) enum SagemakerRequest { Generate(CompatGenerateRequest), Chat(ChatRequest), Completion(CompletionRequest), } // Used for OpenAPI specs #[allow(dead_code)] #[derive(Serialize, ToSchema)] #[serde(untagged)] pub(crate) enum SagemakerResponse { Generate(GenerateResponse), Chat(ChatCompletion), Completion(CompletionFinal), } // Used for OpenAPI specs #[allow(dead_code)] #[derive(Serialize, ToSchema)] #[serde(untagged)] pub(crate) enum SagemakerStreamResponse { Generate(StreamResponse), Chat(ChatCompletionChunk), Completion(Chunk), } /// Generate tokens from Sagemaker request #[utoipa::path( post, tag = "Text Generation Inference", path = "/invocations", request_body = SagemakerRequest, responses( (status = 200, description = "Generated Chat Completion", content( ("application/json" = SagemakerResponse), ("text/event-stream" = SagemakerStreamResponse), )), (status = 424, description = "Generation Error", body = ErrorResponse, example = json ! ({"error": "Request failed during generation", "error_type": "generation"})), (status = 429, description = "Model is overloaded", body = ErrorResponse, example = json ! ({"error": "Model is overloaded", "error_type": "overloaded"})), (status = 422, description = "Input validation error", body = ErrorResponse, example = json ! ({"error": "Input validation error", "error_type": "validation"})), (status = 500, description = "Incomplete generation", body = ErrorResponse, example = json ! ({"error": "Incomplete generation", "error_type": "incomplete_generation"})), ) )] #[instrument(skip_all)] pub(crate) async fn sagemaker_compatibility( default_return_full_text: Extension<bool>, infer: Extension<Infer>, compute_type: Extension<ComputeType>, info: Extension<Info>, Json(req): Json<SagemakerRequest>, ) -> Result<Response, (StatusCode, Json<ErrorResponse>)> { match req { SagemakerRequest::Generate(req) => { compat_generate(default_return_full_text, infer, compute_type, Json(req)).await } SagemakerRequest::Chat(req) => chat_completions(infer, compute_type, info, Json(req)).await, SagemakerRequest::Completion(req) => { completions(infer, compute_type, info, Json(req)).await } } }
text-generation-inference/router/src/sagemaker.rs/0
{ "file_path": "text-generation-inference/router/src/sagemaker.rs", "repo_id": "text-generation-inference", "token_count": 962 }
selective_scan_commit := 2a3704fd47ba817b415627b06fd796b971fdc137 causal-conv1d: rm -rf causal-conv1d git clone https://github.com/Dao-AILab/causal-conv1d.git build-causal-conv1d: causal-conv1d cd causal-conv1d/ && git checkout v1.1.1 # known latest working version tag cd causal-conv1d/ && CAUSAL_CONV1D_FORCE_BUILD=TRUE python setup.py build install-causal-conv1d: build-causal-conv1d pip uninstall causal-conv1d -y || true cd causal-conv1d/ && pip install . # selective-scan dependends on causal-conv1d selective-scan: rm -rf mamba git clone https://github.com/state-spaces/mamba.git mamba build-selective-scan: selective-scan cd mamba/ && git fetch && git checkout $(selective_scan_commit) cd mamba && python setup.py build install-selective-scan: install-causal-conv1d build-selective-scan pip uninstall selective-scan-cuda -y || true cd mamba && pip install . build-all: build-causal-conv1d build-selective-scan
text-generation-inference/server/Makefile-selective-scan/0
{ "file_path": "text-generation-inference/server/Makefile-selective-scan", "repo_id": "text-generation-inference", "token_count": 351 }
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #include <torch/extension.h> #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/CUDAContext.h> #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cstdint> #include <cstdio> #include "util.cuh" #include "tuning.h" #include "cuda_buffers.cuh" #include "cuda_func/q4_matrix.cuh" #include "cuda_func/q4_matmul.cuh" #include "cuda_func/column_remap.cuh" // Check CUDA return code. We don't want to include Torch headers in the .cu files because parsing them adds almost a // minute to the compile time on a 12900K. Also passing exceptions back to Python is super tricky, so in place of // exceptions, CUDA functions return with a cudaError_t which we can parse and dump to the console. void check_cuda(cudaError_t ret) { switch (ret) { case cudaSuccess: break; case cudaUnspecified: printf(" **** Unspecified error\n"); TORCH_CHECK(false, "CUDA error"); break; default: printf(" **** CUDA error\n"); \ printf(" **** %s\n", cudaGetErrorString(ret)); \ TORCH_CHECK(false, "CUDA error"); \ break; } } // Some decluttering macros #define STRINGIFY_(__x) #__x #define STRINGIFY(__x) STRINGIFY_(__x) #define TORCH_CHECK_DTYPE(__x, __dtype) TORCH_CHECK((__x).dtype() == torch::__dtype, #__x " is incorrect datatype, must be " #__dtype) #define TORCH_CHECK_DTYPE_OPT(__x, __dtype) TORCH_CHECK((__x).device().is_meta() || (__x).dtype() == torch::__dtype, #__x " is incorrect datatype, must be " #__dtype) #define TORCH_CHECK_SHAPES(__x, __dim_x, __y, __dim_y, __scale_y) TORCH_CHECK((__x).size(__dim_x) == (__y).size(__dim_y) * __scale_y, #__x " and " #__y " have incompatible shapes") #define TORCH_CHECK_SHAPES_OPT(__x, __dim_x, __y, __dim_y, __scale_y) TORCH_CHECK((__x).device().is_meta() || (__x).size(__dim_x) == (__y).size(__dim_y) * __scale_y, #__x " and " #__y " have incompatible shapes") #define TORCH_CHECK_SHAPE_MOD(__x, __dim_x, __mod) TORCH_CHECK((__x).size(__dim_x) % __mod == 0, #__x ".shape[" STRINGIFY(__dim_x) "] must be a multiple of " STRINGIFY(__mod)) #define TORCH_CHECK_DEVICE_INDEX(__index) \ do { \ TORCH_CHECK(__index >= 0, "no device index"); \ TORCH_CHECK(__index < CUDA_MAX_DEVICES, "invalid device index"); \ } while(0) #define TORCH_CHECK_QUANT(__w, __w_scales, __w_zeros, __seq_g_idx, __x_map) \ do { \ TORCH_CHECK_DTYPE(__w, kInt); \ TORCH_CHECK_DTYPE(__w_scales, kHalf); \ TORCH_CHECK_DTYPE(__w_zeros, kInt); \ TORCH_CHECK_DTYPE_OPT(__seq_g_idx, kShort); \ TORCH_CHECK_DTYPE_OPT(__x_map, kInt); \ TORCH_CHECK_SHAPES_OPT(__seq_g_idx, 0, __w, 0, 2 * 8); \ TORCH_CHECK_SHAPES_OPT(__x_map, 0, __w, 0, 8); \ } while(0) int get_groupsize(torch::Tensor w, torch::Tensor w_zeros) { int groupsize = w.size(0) * 8 / w_zeros.size(0); TORCH_CHECK(groupsize * w_zeros.size(0) == w.size(0) * 8, "w.shape[-2] must be a multiple of zeros.shape[-2]") return groupsize; } // Tuning parameters ExLlamaTuning tuningParams; void set_tuning_params ( int matmul_recons_thd, bool matmul_fused_remap, bool matmul_no_half2 ) { tuningParams.matmul_recons_thd = matmul_recons_thd; tuningParams.matmul_fused_remap = matmul_fused_remap; tuningParams.matmul_no_half2 = matmul_no_half2; } // Release all unmanaged objects allocated by the extension void cleanup() { cleanup_buffers_cuda(); g_q4_free_matrices(); } // Prepare buffers for forward pass void prepare_buffers ( torch::Device device, torch::Tensor temp_state, torch::Tensor temp_dq ) { int device_index = device.index(); TORCH_CHECK_DEVICE_INDEX(device_index); const at::cuda::OptionalCUDAGuard device_guard(device); prepare_buffers_cuda ( device_index, (half*) temp_state.data_ptr(), (half*) temp_dq.data_ptr() ); } // Create Q4Matrix, return handle uintptr_t make_q4 ( torch::Tensor qweight, torch::Tensor qzeros, torch::Tensor scales, torch::Tensor g_idx, int device ) { TORCH_CHECK_DTYPE(qweight, kInt); TORCH_CHECK_DTYPE(qzeros, kInt); TORCH_CHECK_DTYPE(scales, kHalf); TORCH_CHECK_DTYPE_OPT(g_idx, kInt); TORCH_CHECK_SHAPES(qweight, 1, qzeros, 1, 8); TORCH_CHECK_SHAPES(scales, 1, qweight, 1, 1); TORCH_CHECK_SHAPES(qzeros, 0, scales, 0, 1); int width = qweight.size(1); int height = qweight.size(0) * 8; int groups = qzeros.size(0); Q4Matrix* m = new Q4Matrix ( height, width, groups, (uint32_t*) qweight.data_ptr(), (uint32_t*) qzeros.data_ptr(), (half*) scales.data_ptr(), g_idx.device().is_meta() ? NULL : (uint32_t*) g_idx.data_ptr(), device ); g_q4_keep_matrix(m); return reinterpret_cast<uintptr_t> (m); } // Matmul half @ quant -> half void q4_matmul ( torch::Tensor x, uintptr_t w, torch::Tensor out ) { Q4Matrix* wm = reinterpret_cast<Q4Matrix*> (w); TORCH_CHECK_DTYPE(x, kHalf); TORCH_CHECK_DTYPE(out, kHalf); TORCH_CHECK_SHAPES(x, 0, out, 0, 1); TORCH_CHECK(wm->height == x.size(-1), "x and w have incompatible shapes") const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); int x_height = x.size(0); const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (tuningParams.matmul_recons_thd == 0 || x_height < tuningParams.matmul_recons_thd) { q4_matmul_cuda ( &tuningParams, (half*) x.data_ptr(), x_height, wm, (half*) out.data_ptr(), false, stream ); } else { q4_matmul_recons_cuda ( &tuningParams, (half*) x.data_ptr(), x_height, wm, (half*) out.data_ptr(), false, at::cuda::getCurrentCUDABlasHandle() ); } } // Remap columns in half tensor void column_remap ( torch::Tensor x, torch::Tensor x_new, torch::Tensor x_map ) { TORCH_CHECK_DTYPE(x, kHalf); TORCH_CHECK_DTYPE(x_new, kHalf); TORCH_CHECK_DTYPE(x_map, kInt); TORCH_CHECK_SHAPES(x_map, 0, x, 1, 1); int height = x.size(0); int width = x.size(1); const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); column_remap_cuda ( (half*) x.data_ptr(), (half*) x_new.data_ptr(), height, width, (uint32_t*) x_map.data_ptr() ); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("set_tuning_params", &set_tuning_params, "set_tuning_params"); m.def("prepare_buffers", &prepare_buffers, "prepare_buffers"); m.def("cleanup", &cleanup, "cleanup"); m.def("make_q4", &make_q4, "make_q4"); m.def("q4_matmul", &q4_matmul, "q4_matmul"); }
text-generation-inference/server/exllama_kernels/exllama_kernels/exllama_ext.cpp/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/exllama_ext.cpp", "repo_id": "text-generation-inference", "token_count": 3279 }
#ifndef _qdq_2_cuh #define _qdq_2_cuh #include "qdq_util.cuh" #include "../../config.h" #if QMODE_2BIT == 1 // Permutation: // // ffddbb99 77553311 eeccaa88 66442200 __forceinline__ __device__ void shuffle_2bit_16 ( uint32_t* q, int stride ) { uint32_t qa = q[0]; uint32_t qb = 0; #pragma unroll for (int i = 0; i < 8; i++) { uint32_t qa0 = qa & 0x03; uint32_t qa1 = (qa & 0x0c) >> 2; qa >>= 4; qb |= (qa1 << (i * 2 + 16)); qb |= (qa0 << (i * 2)); } q[0] = qb; } __forceinline__ __device__ void dequant_2bit_16 ( const uint32_t q_0, half2 (&dq)[8], int stride ) { const uint32_t c0 = 0x64006400; const half y4_ = __float2half_rn(1.0f / 4.0f); const half y16_ = __float2half_rn(1.0f / 16.0f); const half y64_ = __float2half_rn(1.0f / 64.0f); const half2 y4 = __halves2half2(y4_, y4_); const half2 y16 = __halves2half2(y16_, y16_); const half2 y64 = __halves2half2(y64_, y64_); const half z1_ = __float2half_rn(-1024.0f - 2.0f); const half z4_ = __float2half_rn(-1024.0f / 4.0f - 2.0f); const half z16_ = __float2half_rn(-1024.0f / 16.0f - 2.0f); const half z64_ = __float2half_rn(-1024.0f / 64.0f - 2.0f); const half2 z1 = __halves2half2(z1_, z1_); const half2 z4 = __halves2half2(z4_, z4_); const half2 z16 = __halves2half2(z16_, z16_); const half2 z64 = __halves2half2(z64_, z64_); uint32_t qa = q_0; half2_uint32 q0((qa & 0x00030003) | c0); // half2(q[ 0], q[ 1]) + 1024 half2_uint32 q1((qa & 0x000c000c) | c0); // half2(q[ 2], q[ 3]) * 4 + 1024 half2_uint32 q2((qa & 0x00300030) | c0); // half2(q[ 4], q[ 5]) * 16 + 1024 half2_uint32 q3((qa & 0x00c000c0) | c0); // half2(q[ 6], q[ 7]) * 64 + 1024 qa >>= 8; half2_uint32 q4((qa & 0x00030003) | c0); // half2(q[ 8], q[ 8]) + 1024 half2_uint32 q5((qa & 0x000c000c) | c0); // half2(q[10], q[11]) * 4 + 1024 half2_uint32 q6((qa & 0x00300030) | c0); // half2(q[12], q[13]) * 16 + 1024 half2_uint32 q7((qa & 0x00c000c0) | c0); // half2(q[14], q[15]) * 64 + 1024 dq[0] = __hadd2(q0.as_half2, z1); dq[1] = __hfma2(q1.as_half2, y4, z4); dq[2] = __hfma2(q2.as_half2, y16, z16); dq[3] = __hfma2(q3.as_half2, y64, z64); dq[4] = __hadd2(q4.as_half2, z1); dq[5] = __hfma2(q5.as_half2, y4, z4); dq[6] = __hfma2(q6.as_half2, y16, z16); dq[7] = __hfma2(q7.as_half2, y64, z64); } #else __forceinline__ __device__ void shuffle_2bit_16 ( uint32_t* q, int stride ) { } __forceinline__ __device__ void dequant_2bit_16 ( const uint32_t q_0, half2 (&dq)[8], int stride ) { half dqh[16]; for (int i = 0; i < 16; i++) dqh[i] = dq_ns(exb(q_0, i * 2, 0x03), 2); for (int i = 0; i < 8; i++) dq[i] = __halves2half2(dqh[i * 2], dqh[i * 2 + 1]); } #endif #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_2.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_2.cuh", "repo_id": "text-generation-inference", "token_count": 1589 }
import pytest import os from text_generation_server.pb import generate_pb2 os.environ["PREFIX_CACHING"] = "1" os.environ["ATTENTION"] = "flashinfer" @pytest.fixture def default_pb_parameters(): return generate_pb2.NextTokenChooserParameters( temperature=1.0, repetition_penalty=1.0, top_k=0, top_p=1.0, typical_p=1.0, do_sample=False, ) @pytest.fixture def default_pb_stop_parameters(): return generate_pb2.StoppingCriteriaParameters(stop_sequences=[], max_new_tokens=10)
text-generation-inference/server/tests/conftest.py/0
{ "file_path": "text-generation-inference/server/tests/conftest.py", "repo_id": "text-generation-inference", "token_count": 235 }
# Origin: https://github.com/predibase/lorax # Path: lorax/server/lorax_server/adapters/lora.py # License: Apache License Version 2.0, January 2004 from collections import defaultdict from dataclasses import dataclass from typing import Dict, List, Optional, Set, Tuple, Type, Union from loguru import logger import torch from peft import LoraConfig as _LoraConfig from torch.distributed import ProcessGroup from text_generation_server.utils.log import log_master from text_generation_server.adapters.config import AdapterConfig, ModuleMap from text_generation_server.adapters.weights import ( AdapterBatchMetadata, AdapterWeights, BatchAdapterWeights, ) from text_generation_server.utils.sgmv import ( BGMV_MAX_RANK, MAX_RANK_CUSTOM, get_tmp_tensors, orient_for_rank, pad_rank, use_cutlass_shrink, has_sgmv, ) def get_start_stop_idxs_for_rank(offset, size, rank, world_size): block_size = size // world_size start = offset + rank * block_size stop = offset + (rank + 1) * block_size return start, stop def shard_on_dim( t: torch.Tensor, dim: int, process_group: torch.distributed.ProcessGroup ): world_size = process_group.size() rank = process_group.rank() size = t.shape[dim] start, stop = get_start_stop_idxs_for_rank(0, size, rank, world_size) if dim == 0: tensor = t[start:stop] elif dim == 1: tensor = t[:, start:stop] else: raise NotImplementedError("Let's make that generic when needed") return tensor def shard_lora_weights( weights_a: List[torch.Tensor], weights_b: List[torch.Tensor], split_dim: int, process_group: ProcessGroup, ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: # [hidden_size, r] weights_a = [ shard_on_dim(w, dim=split_dim, process_group=process_group) for w in weights_a ] # [r, hidden_size] weights_b = [shard_on_dim(w, dim=1, process_group=process_group) for w in weights_b] return weights_a, weights_b @dataclass class LoraConfig(AdapterConfig): r: int target_modules: Optional[Union[List[str], str]] fan_in_fan_out: bool lora_alpha: int use_rslora: bool def map_weights_for_model( self, adapter_weights: Dict[int, AdapterWeights], weight_names: Tuple[str], ) -> Tuple[ModuleMap, Set[str]]: adapter_weight_names = set() module_map = {} for weight_name in weight_names: lora_a_name = f"base_model.model.{weight_name}.lora_A.weight" lora_b_name = f"base_model.model.{weight_name}.lora_B.weight" if lora_a_name not in adapter_weights or lora_b_name not in adapter_weights: continue module_map[weight_name] = { "lora_A": (adapter_weights[lora_a_name], lora_a_name), "lora_B": (adapter_weights[lora_b_name], lora_b_name), } adapter_weight_names.add(lora_a_name) adapter_weight_names.add(lora_b_name) return module_map, adapter_weight_names @classmethod def load(cls, adapter_id: str, api_token: str) -> "LoraConfig": hf_config = _LoraConfig.from_pretrained(adapter_id, token=api_token) return cls( base_model_name_or_path=hf_config.base_model_name_or_path, r=hf_config.r, target_modules=hf_config.target_modules, fan_in_fan_out=hf_config.fan_in_fan_out, lora_alpha=hf_config.lora_alpha, use_rslora=( hf_config.use_rslora if hasattr(hf_config, "use_rslora") else False ), ) class LoraWeights(AdapterWeights): """LoRA weights for a single adapter merged across all layers.""" def __init__( self, weights_a: List[torch.Tensor], weights_b: List[torch.Tensor], adapter_config: LoraConfig, ): self.lora_a_r = weights_a[0].size(1) if len(weights_a) > 0 else 1 self.lora_b_r = weights_b[0].size(0) if len(weights_a) > 0 else 1 self._use_cutlass_shrink = use_cutlass_shrink(self.lora_a_r) self._is_transposed = False # [num_layers, hidden_size, r] weights_a = [orient_for_rank(w, w.size(1)).contiguous() for w in weights_a] self._weights_a = torch.stack(weights_a) # [num_layers, r, hidden_size] self._weights_b = torch.stack(weights_b) self.adapter_config = adapter_config @property def weights_a(self) -> torch.Tensor: if self._is_transposed: self._transpose_weights() return self._weights_a @property def weights_b(self) -> torch.Tensor: if self._is_transposed: self._transpose_weights() return self._weights_b @property def weights_a_t(self) -> torch.Tensor: if not self._is_transposed: self._transpose_weights() return self._weights_a @property def weights_b_t(self) -> torch.Tensor: if not self._is_transposed: self._transpose_weights() return self._weights_b def _transpose_weights(self): if self._use_cutlass_shrink: # If we're not using the cutlass shrink, then both SGMV and BGMV use the same orientation self._weights_a = self._weights_a.transpose(1, 2).contiguous() self._weights_b = self._weights_b.transpose(1, 2).contiguous() self._is_transposed = not self._is_transposed @classmethod def get_batch_types(cls) -> List[Type[BatchAdapterWeights]]: return [BatchLoraWeights] # prepare pre-loaded lora weights for use in the model. # # this method processes and organizes lora weights for a specific layer type across all layers: # - uses `config` (LoraConfig) to apply lora-specific settings like scaling factor. # - retrieves weights from `module_map` based on the `layer_type`. # - processes `nlayers` number of layers. # - converts weights to the specified `dtype`. # - shards weights across `world_size` number of processes using the `process_group`. # - maps weights to specific layers using `target_to_layer`. # - tracks `unused_weight_names` to identify any unused weights. # # the method handles weight transposition, scaling, and padding to ensure compatibility # with SGMV or BGMV operations. @classmethod def prepare_weights( cls, config: LoraConfig, module_map: Dict[str, Dict], layer_type: str, unused_weight_names: Set[str], nlayers: int, dtype: torch.dtype, world_size: int, process_group: ProcessGroup, target_to_layer: Dict[str, Tuple[str, torch.Tensor]], ) -> Optional[AdapterWeights]: lora_a_list = [None] * nlayers lora_b_list = [None] * nlayers # import ipdb; ipdb.set_trace() for layer_id in range(nlayers): key = (layer_id, layer_type) if key not in target_to_layer: # There is no layer of this type in the model log_master( logger.warning, f"Key specified in lora weights but not found in base model: {key}", ) return None weight_name, layer = target_to_layer[key] base_weight = layer.base_layer.linear.weight base_device = base_weight.device if weight_name not in module_map: # There is no LoRA weight for this layer type in the adapter return None lora_a, lora_a_name = module_map[weight_name]["lora_A"] lora_a = lora_a.to(base_device, dtype) lora_b, lora_b_name = module_map[weight_name]["lora_B"] lora_b = lora_b.to(base_device, dtype) scale = get_scaling_factor( config.lora_alpha, config.r, uses_rslora=config.use_rslora, ) unused_weight_names.discard(lora_a_name) unused_weight_names.discard(lora_b_name) # Merge scaling factor into lora_b due to associativity of matrix multiplication: # (A * B) * C = A * (B * C) lora_a_list[layer_id] = lora_a.transpose(0, 1) lora_b_list[layer_id] = lora_b.transpose(0, 1) * scale # pad lora ranks to be compatible with sgmv lora_a_list = [pad_rank(w, dim=1, world_size=world_size) for w in lora_a_list] lora_b_list = [pad_rank(w, dim=0, world_size=world_size) for w in lora_b_list] if lora_a_list: # update rank if it was padded padded_rank = lora_a_list[0].size(1) config.r = padded_rank return LoraWeights( *shard_lora_weights( weights_a=lora_a_list, weights_b=lora_b_list, split_dim=0 if layer_type in {"o_proj", "down_proj", "lm_head"} else 1, process_group=process_group, ), config, ) @dataclass class RankSegments: rank: int lora_a_ptr: torch.Tensor lora_b_ptr: torch.Tensor # prefill (sgmv) tmp_shrink: torch.Tensor tmp_expand: torch.Tensor segment_starts: torch.Tensor segment_ends: torch.Tensor # decode (bgmv) indices: torch.Tensor @dataclass class BatchLoraWeights(BatchAdapterWeights): lora_a: Dict[int, torch.Tensor] lora_b: Dict[int, torch.Tensor] adapter_index_configs: Dict[int, LoraConfig] rank_data: Dict[int, RankSegments] use_sgmv: bool def has_adapter(self, adapter_index: int) -> bool: return adapter_index in self.adapter_index_configs def can_vectorize(self, pg: ProcessGroup) -> bool: return all( rank_data.rank // pg.size() <= MAX_RANK_CUSTOM for rank_data in self.rank_data.values() ) @classmethod def load( self, adapter_weights: Dict[int, AdapterWeights], meta: AdapterBatchMetadata, prefill: bool, prefill_head_indices: Optional[torch.Tensor], ) -> Optional["BatchLoraWeights"]: adapter_weights = {k: _convert_lora(v) for k, v in adapter_weights.items()} adapter_weights = { k: v for k, v in adapter_weights.items() if isinstance(v, LoraWeights) } if not adapter_weights: return None first_weights = next(iter(adapter_weights.values())) device = first_weights.weights_a.device segment_indices = meta.segment_indices lora_a = { idx: adapter_weights[idx].weights_a for idx in segment_indices if idx in adapter_weights } lora_b = { idx: adapter_weights[idx].weights_b for idx in segment_indices if idx in adapter_weights } max_rank = max( ( adapter_weights[idx].lora_a_r for idx in segment_indices if idx in adapter_weights ), default=0, ) use_sgmv = False if prefill or max_rank > BGMV_MAX_RANK: if has_sgmv(): use_sgmv = True lora_a_ptr = torch.tensor( [ ( adapter_weights[idx].weights_a.data_ptr() if idx in adapter_weights else 0 ) for idx in segment_indices ], dtype=torch.int64, device=device, ) lora_b_ptr = torch.tensor( [ ( adapter_weights[idx].weights_b.data_ptr() if idx in adapter_weights else 0 ) for idx in segment_indices ], dtype=torch.int64, device=device, ) else: lora_a_ptr = torch.tensor( [ ( adapter_weights[idx].weights_a_t.data_ptr() if idx in adapter_weights else 0 ) for idx in segment_indices ], dtype=torch.int64, device=device, ) lora_b_ptr = torch.tensor( [ ( adapter_weights[idx].weights_b_t.data_ptr() if idx in adapter_weights else 0 ) for idx in segment_indices ], dtype=torch.int64, device=device, ) adapter_index_configs = { idx: adapter_weights[idx].adapter_config for idx in segment_indices if idx in adapter_weights } adapter_to_segment = {v: k for k, v in enumerate(segment_indices)} rank_indices = defaultdict(list) for segment_idx, adapter_idx in enumerate(segment_indices): if adapter_idx not in adapter_weights: continue rank_indices[adapter_weights[adapter_idx].lora_a_r].append(segment_idx) if prefill_head_indices is not None: j, prefill_head_segment_starts, prefill_head_segment_ends = 1, [0], [0] for head_index in prefill_head_indices: # j cannot go out of bounds as that would mean there are tokens without corresponding adapters if head_index < meta.adapter_segments[j]: prefill_head_segment_ends[-1] += 1 else: prefill_head_segment_starts.append(prefill_head_segment_ends[-1]) prefill_head_segment_ends.append(prefill_head_segment_ends[-1] + 1) j += 1 rank_data = {} for rank, indices in rank_indices.items(): tmp_shrink = None tmp_expand = None segment_starts = None segment_ends = None batch_indices = None if use_sgmv: lora_a_ptr_indices = lora_a_ptr[indices] tmp_shrink, tmp_expand = get_tmp_tensors( lora_a_ptr_indices.size(0), rank, device ) segment_starts = meta.adapter_segments[indices] segment_ends = meta.adapter_segments[[i + 1 for i in indices]] if prefill_head_indices is not None: for i, segment_index in enumerate(indices): segment_starts[i] = prefill_head_segment_starts[segment_index] segment_ends[i] = prefill_head_segment_ends[segment_index] else: rank_indices = set(indices) batch_indices = [ adapter_to_segment[idx] for idx in meta.adapter_indices.tolist() ] batch_indices = [ idx if idx in rank_indices else -1 for idx in batch_indices ] batch_indices = torch.tensor( batch_indices, dtype=torch.int64, device=device ) rank_data[rank] = RankSegments( rank=rank, tmp_shrink=tmp_shrink, tmp_expand=tmp_expand, lora_a_ptr=lora_a_ptr[indices], lora_b_ptr=lora_b_ptr[indices], segment_starts=segment_starts, segment_ends=segment_ends, indices=batch_indices, ) return BatchLoraWeights( lora_a=lora_a, lora_b=lora_b, adapter_index_configs=adapter_index_configs, rank_data=rank_data, use_sgmv=use_sgmv, ) def get_scaling_factor( lora_alpha: int, r: int, uses_rslora: bool = False, ) -> float: """Computes the scaling factor for the lora weights.""" if uses_rslora: return lora_alpha / (r**0.5) return lora_alpha / r def _convert_lora(v: AdapterWeights) -> AdapterWeights: if hasattr(v, "lora_weights"): return v.lora_weights return v
text-generation-inference/server/text_generation_server/adapters/lora.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/adapters/lora.py", "repo_id": "text-generation-inference", "token_count": 8265 }
# Copied logic from https://github.com/mit-han-lab/llm-awq/blob/f084f40bd996f3cf3a0633c1ad7d9d476c318aaa/awq/quantize/qmodule.py from typing import Optional import torch import torch.nn as nn import awq_inference_engine # with CUDA kernels # class ScaledActivation(nn.Module): # def __init__(self, module, scales): # super().__init__() # self.act = module # self.scales = nn.Parameter(scales.data) # # def forward(self, x): # return self.act(x) / self.scales.view(1, 1, -1).to(x.device) class WQLinear(nn.Module): def __init__( self, w_bit, group_size, qweight, qzeros, scales, bias: Optional[torch.Tensor] ): super().__init__() if w_bit not in [4]: raise NotImplementedError("Only 4-bit are supported for now.") self.in_features = qweight.shape[0] self.out_features = qweight.shape[1] * 32 // w_bit self.w_bit = w_bit self.group_size = group_size if group_size != -1 else self.in_features # quick sanity check (make sure aligment) assert self.in_features % self.group_size == 0 assert self.out_features % (32 // self.w_bit) == 0 self.qweight = qweight self.qzeros = qzeros self.scales = scales self.bias = bias @torch.no_grad() def forward(self, x): out_shape = x.shape[:-1] + (self.out_features,) out = awq_inference_engine.gemm_forward_cuda( x.reshape(-1, x.shape[-1]), self.qweight, self.scales, self.qzeros, 8 ) out = out + self.bias if self.bias is not None else out return out.reshape(out_shape)
text-generation-inference/server/text_generation_server/layers/awq/quantize/cuda.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/awq/quantize/cuda.py", "repo_id": "text-generation-inference", "token_count": 750 }
# Adapted from turboderp exllama: https://github.com/turboderp/exllamav2 from dataclasses import dataclass from typing import Optional import torch import torch.nn as nn from loguru import logger from text_generation_server.layers.exl2 import Exl2Weight from text_generation_server.layers.gptq import GPTQWeight from text_generation_server.utils.log import log_master try: from exllamav2.ext import exllamav2_ext make_q_matrix = exllamav2_ext.make_q_matrix gemm_half_q_half = exllamav2_ext.gemm_half_q_half except ImportError: log_master(logger.warning, "exllamav2_kernels not installed.") raise # Dummy tensor to pass instead of g_idx since there is no way to pass "None" to a C++ extension none_tensor = torch.empty((1, 1), device="meta") @dataclass class _ExtraTensors: """Additional generated quantizer tensors.""" q_group_map: Optional[torch.Tensor] = None q_invperm: Optional[torch.Tensor] = None q_perm: Optional[torch.Tensor] = None def ext_gemm_half_q_half(x, q_handle, q4_width, force_cuda): """Matrix multiplication, returns x @ q4""" output_shape = x.shape[:-1] + (q4_width,) x = x.view(-1, x.shape[-1]) output = torch.empty((x.shape[0], q4_width), dtype=torch.half, device=x.device) gemm_half_q_half(x, q_handle, output, force_cuda) return output.view(output_shape) def make_group_map(q_groups: torch.Tensor, num_qrows: int): gr = q_groups.tolist() group_map = [] num_groups = len(gr) // 2 for i in range(num_groups): bits = gr[i * 2] if i < num_groups - 1: qrows = gr[i * 2 + 3] - gr[i * 2 + 1] else: qrows = num_qrows - gr[i * 2 + 1] rows = qrows * 32 // bits for j in range(rows): group_map += [i] group_map += [rows - j] return torch.tensor(group_map, dtype=torch.short, device=q_groups.device) # Create Q matrix def ext_make_q_matrix( w: Exl2Weight | GPTQWeight, extra: _ExtraTensors, temp_dq, key: Optional[str] = None, ): """ Create Q matrix """ # max_dq_size = 512*(1024**2) # max_dq_rows = max_dq_size // out_features[0] max_dq_rows = 0 # EXL2 if isinstance(w, Exl2Weight): extra.q_group_map = make_group_map(w.q_groups, w.q_weight.shape[0]) extra.q_perm = torch.argsort(w.q_invperm).short() return make_q_matrix( w.q_weight, extra.q_perm, w.q_invperm, w.q_scale, w.q_scale_max, w.q_groups, extra.q_group_map, none_tensor, # zeros none_tensor, # scales none_tensor, # g_idx none_tensor, # bias temp_dq, max_dq_rows, ) # GPTQ elif isinstance(w, GPTQWeight): if w.scales.dtype == torch.float: w.scales = w.scales.half() # GPTQ with g_idx (act_order) if w.g_idx is not None and not (w.g_idx == 0).all().item(): extra.q_perm = torch.empty( (w.qweight.shape[0] * 8,), dtype=torch.short, device=w.qweight.device, ) extra.q_invperm = torch.empty_like(extra.q_perm) # make_q4 segfaults if g_idx is not on cpu in the act-order case. In the non act-order case, None needs to be passed for g_idx. return make_q_matrix( w.qweight, extra.q_perm, extra.q_invperm, none_tensor, # q_scale none_tensor, # q_scale_max none_tensor, # q_groups none_tensor, # q_group_map w.qzeros, w.scales, w.g_idx.cpu(), none_tensor, # bias temp_dq, max_dq_rows, ) # GPTQ without g_idx else: return make_q_matrix( w.qweight, none_tensor, # q_perm none_tensor, # q_invperm none_tensor, # q_scale none_tensor, # q_scale_max none_tensor, # q_groups none_tensor, # q_group_map w.qzeros, w.scales, none_tensor, # g_idx none_tensor, # bias temp_dq, max_dq_rows, ) else: RuntimeError("Cannot create handle") DEVICE = None LAYERS = [] def set_device(device): global DEVICE DEVICE = device def create_exllama_buffers(max_total_tokens: int): global LAYERS, DEVICE # No need to initialize scratch space if there are no layers # that use ExLLamav2. if len(LAYERS) == 0: return # Find the size of the scratch space. scratch_bytes = max( layer.scratch_space_fixed(max_input_len=max_total_tokens, max_batch_size=1) for layer in LAYERS ) temp_dq = ExLlamaV2DeviceTensors(DEVICE, scratch_bytes) for layer in LAYERS: layer.post_init(temp_dq) class QuantLinear(nn.Module): QUANT_TYPE = "exllamav2" """Linear layer implementation with per-group 4-bit quantization of the weights""" def __init__( self, weight: Exl2Weight | GPTQWeight, bias: torch.Tensor, ): super().__init__() self.q_handle = None self.q_tensors = weight self.extra_tensors = _ExtraTensors() if isinstance(weight, Exl2Weight): self.infeatures = weight.q_invperm.shape[0] self.outfeatures = weight.q_weight.shape[1] elif isinstance(weight, GPTQWeight): if weight.bits != 4: raise ValueError( f"Exllamav2 kernel supports only bits=4, requested bits={weight.bits}. Something is wrong in the model initialization." ) self.infeatures = weight.qweight.shape[0] // weight.bits * 32 self.outfeatures = weight.qweight.shape[1] self.padding = -self.outfeatures % 32 self.outfeatures = self.outfeatures + self.padding self.device = weight.device self.bias = bias if bias is not None else None global LAYERS LAYERS.append(self) def post_init(self, temp_dq): device = self.q_tensors.device assert device.type == "cuda" assert device.index is not None temp_dq = temp_dq.get_scratch_slice(self.temp_dq_size()) # We NEED to keep a pointer on Python side, otherwise the garbage collector will mess with us, # and `Memory access fault by GPU node-2` will EAT you. self.temp_dq = temp_dq self.q_handle = ext_make_q_matrix(self.q_tensors, self.extra_tensors, temp_dq) def forward(self, x, force_cuda=False): output = ext_gemm_half_q_half(x, self.q_handle, self.outfeatures, force_cuda) if self.bias is not None: output.add_(self.bias) return output def temp_dq_size(self): return self.infeatures * self.outfeatures * 2 + 128 def temp_fwd_size(self, max_input_len, max_batch_size): return self.outfeatures * max_input_len * max_batch_size * 4 + 128 def scratch_space_fixed(self, max_input_len, max_batch_size): return self.temp_dq_size() + self.temp_fwd_size(max_input_len, max_batch_size) class ExLlamaV2DeviceTensors: device_idx: int scratch_bytes: int scratch_idx: int scratch: torch.tensor = None def __init__(self, device, scratch_bytes): self.device = device self.scratch_bytes = scratch_bytes def prepare(self): self.scratch = torch.empty( (self.scratch_bytes // 2,), dtype=torch.half, device=self.device ) def get_scratch_slice(self, size_bytes): if self.scratch is None: self.prepare() size_bytes = ((size_bytes + 127) // 128) * 128 size_half = size_bytes // 2 scratch_slice = self.scratch.narrow(0, 0, size_half) return scratch_slice
text-generation-inference/server/text_generation_server/layers/gptq/exllamav2.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/gptq/exllamav2.py", "repo_id": "text-generation-inference", "token_count": 3935 }
from typing import Optional import torch import torch.nn as nn from text_generation_server.utils.weights import Weights from text_generation_server.layers.fp8 import ( Fp8Weight, fp8_quantize, quant_dtype, normalize_e4m3fn_to_native_float8, ) try: from moe_kernels.fused_moe import fused_moe except Exception: fused_moe = None class FP8SparseMoELayer(nn.Module): def __init__( self, *, n_expert_group: Optional[int], n_experts: int, prefix: str, renormalize: bool, topk: int, topk_group: Optional[int], weights: Weights, scoring_func: Optional[str] = "softmax", e_score_correction_bias: Optional[float] = None, gate_proj_name: str = "gate_proj", up_proj_name: str = "up_proj", down_proj_name: str = "down_proj", ): super().__init__() assert (n_expert_group is None) == ( topk_group is None ), "n_expert_group and topk_group must both be None or have some value" self.n_expert_group = n_expert_group self.topk = topk self.topk_group = topk_group self.renormalize = renormalize self.weight_block_size = weights.weights_loader.weight_block_size self.scoring_func = scoring_func self.e_score_correction_bias = e_score_correction_bias ( self.gate_up_proj, self.gate_up_proj_weight_scale, self.gate_up_proj_input_scale, ) = _load_expert_multi_weights_col( prefix=prefix, n_experts=n_experts, gate_proj_name=gate_proj_name, up_proj_name=up_proj_name, weights=weights, ) self.down_proj, self.down_proj_weight_scale, self.down_proj_input_scale = ( _load_expert_weights_row( prefix=prefix, n_experts=n_experts, name=down_proj_name, weights=weights, ) ) def forward(self, x: torch.Tensor, *, gating_output: torch.Tensor) -> torch.Tensor: return fused_moe( x, w1=self.gate_up_proj, w2=self.down_proj, gating_output=gating_output, topk=self.topk, renormalize=self.renormalize, inplace=True, use_grouped_topk=self.n_expert_group is not None, num_expert_group=self.n_expert_group, topk_group=self.topk_group, scoring_func=self.scoring_func, e_score_correction_bias=self.e_score_correction_bias, use_fp8_w8a8=True, w1_scale=self.gate_up_proj_weight_scale, w2_scale=self.down_proj_weight_scale, a1_scale=self.gate_up_proj_input_scale, a2_scale=self.down_proj_input_scale, ) def _load_expert_weights( get_weight_fn, *, prefix: str, n_experts: int, name: str, weights: Weights, ) -> torch.Tensor: all_weight = None all_weight_scales = None max_input_scale = None for i in range(n_experts): weight = get_weight_fn(prefix, i, name, weights) assert isinstance(weight, Fp8Weight) if all_weight is None: all_weight = torch.empty( (n_experts,) + weight.weight.shape, dtype=quant_dtype, device=weight.weight.device, ) if all_weight_scales is None: all_weight_scales = torch.empty( (n_experts,) + weight.weight_scale.shape, dtype=torch.float32, device=weight.weight.device, ) if weight.weight.dtype in {torch.float8_e4m3fn, torch.float8_e4m3fnuz}: all_weight[i], all_weight_scales[i], current_input_scale = ( normalize_e4m3fn_to_native_float8( weight.weight, weight.weight_scale, weight.input_scale ) ) if current_input_scale is not None: if max_input_scale is None or current_input_scale > max_input_scale: max_input_scale = current_input_scale else: all_weight[i], all_weight_scales[i] = fp8_quantize( weight.weight, scalar=True ) assert all_weight is not None return all_weight, all_weight_scales, max_input_scale def _load_expert_multi_weights_col( *, prefix: str, n_experts: int, gate_proj_name: str, up_proj_name: str, weights: Weights, ) -> torch.Tensor: def get_weight_fn(prefix, i, name, weights): return weights.get_multi_weights_col( [f"{prefix}.{i}.{gate_proj_name}", f"{prefix}.{i}.{up_proj_name}"], 0 ) return _load_expert_weights( get_weight_fn, prefix=prefix, n_experts=n_experts, name=None, weights=weights ) def _load_expert_weights_row( *, prefix: str, n_experts: int, name: str, weights: Weights, ) -> torch.Tensor: def get_weight_fn(prefix, i, name, weights): return weights.get_weights_row(f"{prefix}.{i}.{name}") return _load_expert_weights( get_weight_fn, prefix=prefix, n_experts=n_experts, name=name, weights=weights )
text-generation-inference/server/text_generation_server/layers/moe/fp8.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/moe/fp8.py", "repo_id": "text-generation-inference", "token_count": 2692 }
# coding=utf-8 # Copyright 2024 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Idefics2 model.""" from typing import List, Optional, Tuple import torch import torch.utils.checkpoint from torch import nn import math from transformers.activations import ACT2FN from text_generation_server.models.custom_modeling.vlm import ( load_text_model, ) from text_generation_server.layers.attention import Seqlen from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask from text_generation_server.layers import ( TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, ) from text_generation_server.utils.weights import DefaultWeightsLoader, UnquantizedWeight def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand( batch, num_key_value_heads, n_rep, slen, head_dim ) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class Idefics2VisionEmbeddings(nn.Module): """ This is a modified version of `siglip.modelign_siglip.SiglipVisionEmbeddings` to enable images of variable resolution. The modifications are adapted from [Patch n' Pack: NaViT, a Vision Transformer for any Aspect Ratio and Resolution](https://arxiv.org/abs/2307.06304) which allows treating images in their native aspect ratio and without the need to resize them to the same fixed size. In particular, we start from the original pre-trained SigLIP model (which uses images of fixed-size square images) and adapt it by training on images of variable resolutions. """ def __init__(self, prefix, config, weights): super().__init__() self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, padding="valid", ) self.patch_embedding.weight = nn.Parameter( weights.get_tensor(f"{prefix}.patch_embedding.weight"), requires_grad=False ) self.patch_embedding.bias = nn.Parameter( weights.get_tensor(f"{prefix}.patch_embedding.bias"), requires_grad=False ) self.num_patches_per_side = self.image_size // self.patch_size self.num_patches = self.num_patches_per_side**2 self.num_positions = self.num_patches self.position_embedding = TensorParallelEmbedding( prefix=f"{prefix}.position_embedding", weights=weights ) def forward( self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor ) -> torch.Tensor: batch_size, _, max_im_h, max_im_w = pixel_values.shape patch_embeds = self.patch_embedding(pixel_values) embeddings = patch_embeds.flatten(2).transpose(1, 2) max_nb_patches_h, max_nb_patches_w = ( max_im_h // self.patch_size, max_im_w // self.patch_size, ) boundaries = torch.arange( 1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side ) position_ids = torch.full( size=(batch_size, max_nb_patches_h * max_nb_patches_w), fill_value=0 ) for batch_idx, p_attn_mask in enumerate(patch_attention_mask): nb_patches_h = p_attn_mask[:, 0].sum() nb_patches_w = p_attn_mask[0].sum() fractional_coords_h = torch.arange(0, 1 - 1e-6, 1 / nb_patches_h) fractional_coords_w = torch.arange(0, 1 - 1e-6, 1 / nb_patches_w) bucket_coords_h = torch.bucketize( fractional_coords_h, boundaries, right=True ) bucket_coords_w = torch.bucketize( fractional_coords_w, boundaries, right=True ) pos_ids = ( bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w ).flatten() position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids position_ids = position_ids.to(self.position_embedding.weight.device) embeddings = embeddings + self.position_embedding(position_ids) return embeddings class Idefics2VisionAttention(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_size = self.embed_dim // self.num_heads if self.head_size * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_size**-0.5 self.dropout = config.attention_dropout self.num_heads = self.num_heads // weights.process_group.size() self.embed_dim = self.embed_dim // weights.process_group.size() self.qkv = TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=True, ) self.out_proj = TensorParallelRowLinear.load( config=config, prefix=f"{prefix}.out_proj", weights=weights, bias=True ) self.is_causal = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: batch_size, q_len, _ = hidden_states.size() qkv = self.qkv(hidden_states) query_states, key_states, value_states = qkv.split( [ self.head_size * self.num_heads, self.head_size * self.num_heads, self.head_size * self.num_heads, ], dim=2, ) query_states = query_states.view( batch_size, q_len, self.num_heads, self.head_size ).transpose(1, 2) key_states = key_states.view( batch_size, q_len, self.num_heads, self.head_size ).transpose(1, 2) value_states = value_states.view( batch_size, q_len, self.num_heads, self.head_size ).transpose(1, 2) k_v_seq_len = key_states.shape[-2] attn_weights = ( torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale ) if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len): raise ValueError( f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax( attn_weights, dim=-1, dtype=torch.float32 ).to(query_states.dtype) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_size): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_size)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output class Idefics2VisionMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = TensorParallelColumnLinear.load( prefix=f"{prefix}.fc1", config=config, weights=weights, bias=True ) self.fc2 = TensorParallelRowLinear.load( prefix=f"{prefix}.fc2", config=config, weights=weights, bias=True ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class Idefics2EncoderLayer(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.embed_dim = config.hidden_size self.self_attn = Idefics2VisionAttention( prefix=f"{prefix}.self_attn", config=config, weights=weights ) self.layer_norm1 = nn.LayerNorm.load( prefix=f"{prefix}.layer_norm1", eps=config.layer_norm_eps, weights=weights ) self.layer_norm2 = nn.LayerNorm.load( prefix=f"{prefix}.layer_norm2", eps=config.layer_norm_eps, weights=weights ) self.mlp = Idefics2VisionMLP( prefix=f"{prefix}.mlp", config=config, weights=weights ) # Copied from transformers.models.siglip.modeling_siglip.SiglipEncoderLayer.forward def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, ) -> torch.Tensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states class Idefics2Encoder(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.layers = nn.ModuleList( [ Idefics2EncoderLayer( prefix=f"{prefix}.layers.{i}", config=config, weights=weights ) for i in range(config.num_hidden_layers) ] ) # Ignore copy def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, ): hidden_states = inputs_embeds for encoder_layer in self.layers: hidden_states = encoder_layer( hidden_states, attention_mask, ) return hidden_states class Idefics2VisionTransformer(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.embeddings = Idefics2VisionEmbeddings( prefix=f"{prefix}.embeddings", config=config, weights=weights ) self.encoder = Idefics2Encoder( prefix=f"{prefix}.encoder", config=config, weights=weights ) self.post_layernorm = nn.LayerNorm.load( prefix=f"{prefix}.post_layernorm", weights=weights, eps=config.layer_norm_eps, ) def forward( self, pixel_values, patch_attention_mask: Optional[torch.BoolTensor] = None, ): batch_size = pixel_values.size(0) if patch_attention_mask is None: patch_size = self.config.patch_size patch_attention_mask = torch.ones( ( batch_size, pixel_values.size(2) // patch_size, pixel_values.size(3) // patch_size, ) ) patch_attention_mask = patch_attention_mask.to( dtype=torch.bool, device=pixel_values.device ) hidden_states = self.embeddings( pixel_values=pixel_values, patch_attention_mask=patch_attention_mask ) patch_attention_mask = patch_attention_mask.view(batch_size, -1) # The call to `_upad_input` in `_flash_attention_forward` is expensive # So when the `patch_attention_mask` is full of 1s (i.e. attending to the whole sequence), # avoiding passing the attention_mask, which is equivalent to attending to the full sequence if not torch.any(~patch_attention_mask): patch_attention_mask = None else: patch_attention_mask = _prepare_4d_attention_mask( patch_attention_mask, hidden_states.dtype ) encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=patch_attention_mask, ) last_hidden_state = encoder_outputs last_hidden_state = self.post_layernorm(last_hidden_state) return last_hidden_state class Idefics2MLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.text_config.hidden_act self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) ) self.gate_up_proj = TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"], weights=weights, dim=0, bias=False, ) self.down_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.down_proj", weights=weights, bias=False, ) def forward(self, hidden_states): start_shape = hidden_states.shape[:-1] gate_up_states = self.gate_up_proj(hidden_states) intermediate_size = gate_up_states.shape[-1] // 2 gate_up_states = gate_up_states.view(-1, 2, intermediate_size) return self.down_proj( self.act(gate_up_states[:, 0]) * gate_up_states[:, 1] ).view(*start_shape, -1) class Idefics2RMSNorm(nn.Module): def __init__(self, prefix, weights, eps): """ Idefics2RMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter( weights.get_tensor(f"{prefix}.weight"), requires_grad=False ) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) class Idefics2PerceiverAttention(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.layer_idx = None self.hidden_size = config.text_config.hidden_size self.num_heads = config.perceiver_config.resampler_n_heads self.head_size = config.perceiver_config.resampler_head_dim self.num_key_value_heads = config.perceiver_config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.attention_dropout = config.perceiver_config.attention_dropout self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = ( self.num_key_value_heads // weights.process_group.size() ) self.q_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.q_proj", weights=weights, bias=False, ) self.kv = TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=False, ) self.o_proj = TensorParallelRowLinear.load( config=config, prefix=f"{prefix}.o_proj", weights=weights, bias=False ) self.is_causal = False def forward( self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = latents.size() kv_seq_len = q_len + context.size()[1] hidden_states = torch.concat([context, latents], dim=-2) query_states = self.q_proj(latents) kv = self.kv(hidden_states) key_states, value_states = kv.split( [ self.head_size * self.num_key_value_heads, self.head_size * self.num_key_value_heads, ], dim=2, ) query_states = query_states.view( bsz, q_len, self.num_heads, self.head_size ).transpose(1, 2) key_states = key_states.view( bsz, kv_seq_len, self.num_key_value_heads, self.head_size ).transpose(1, 2) value_states = value_states.view( bsz, kv_seq_len, self.num_key_value_heads, self.head_size ).transpose(1, 2) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul( query_states, key_states.transpose(2, 3) ) / math.sqrt(self.head_size) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax( attn_weights, dim=-1, dtype=torch.float32 ).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_size): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_size)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_size) attn_output = self.o_proj(attn_output) return attn_output class Idefics2PerceiverLayer(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.hidden_size = config.text_config.hidden_size self.n_latents = config.perceiver_config.resampler_n_latents self.depth = config.perceiver_config.resampler_depth self.rms_norm_eps = config.text_config.rms_norm_eps self.input_latents_norm = Idefics2RMSNorm( prefix=f"{prefix}.input_latents_norm", weights=weights, eps=self.rms_norm_eps, ) self.input_context_norm = Idefics2RMSNorm( prefix=f"{prefix}.input_context_norm", weights=weights, eps=self.rms_norm_eps, ) self.self_attn = Idefics2PerceiverAttention( prefix=f"{prefix}.self_attn", config=config, weights=weights ) self.post_attention_layernorm = Idefics2RMSNorm( prefix=f"{prefix}.post_attention_layernorm", weights=weights, eps=self.rms_norm_eps, ) self.mlp = Idefics2MLP(prefix=f"{prefix}.mlp", config=config, weights=weights) def forward( self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, ): """ Args: latents (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` context (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. """ residual = latents latents = self.input_latents_norm(latents) context = self.input_context_norm(context) latents = self.self_attn( latents=latents, context=context, attention_mask=attention_mask, ) latents = residual + latents residual = latents latents = self.post_attention_layernorm(latents) latents = self.mlp(latents) latents = residual + latents return latents class Idefics2PerceiverResampler(nn.Module): def __init__(self, prefix, config, weights) -> None: super().__init__() self.hidden_size = config.text_config.hidden_size self.hidden_act = config.perceiver_config.hidden_act self.n_latents = config.perceiver_config.resampler_n_latents self.depth = config.perceiver_config.resampler_depth self.rms_norm_eps = config.text_config.rms_norm_eps # Create Latents for Perceiver self.latents = weights.get_tensor(f"{prefix}.latents") # Create Transformer Blocks self.layers = nn.ModuleList( [ Idefics2PerceiverLayer( prefix=f"{prefix}.layers.{idx}", config=config, weights=weights ) for idx in range(self.depth) ] ) self.norm = Idefics2RMSNorm( prefix=f"{prefix}.norm", weights=weights, eps=config.text_config.rms_norm_eps, ) def forward( self, context: torch.Tensor, attention_mask, ) -> torch.Tensor: # seq embed -> bsz seq embed latents = self.latents.unsqueeze(0).expand( (context.shape[0], *self.latents.size()) ) latent_attention_mask = torch.ones( (attention_mask.size(0), latents.size(1)), dtype=attention_mask.dtype, device=attention_mask.device, ) attention_mask = torch.cat([attention_mask, latent_attention_mask], dim=-1) attention_mask = _prepare_4d_attention_mask( attention_mask, latents.dtype, tgt_len=self.n_latents ) compressed_context = latents for perceiver_layer in self.layers: compressed_context = perceiver_layer( compressed_context, context, attention_mask=attention_mask, ) compressed_context = self.norm(compressed_context) return compressed_context class Idefics2Connector(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.modality_projection = Idefics2MLP( prefix=f"{prefix}.modality_projection", config=config, weights=weights ) self.perceiver_resampler = Idefics2PerceiverResampler( prefix=f"{prefix}.perceiver_resampler", config=config, weights=weights ) def forward(self, image_hidden_states, attention_mask): image_hidden_states = self.modality_projection(image_hidden_states) image_hidden_states = self.perceiver_resampler( context=image_hidden_states, attention_mask=attention_mask ) return image_hidden_states class Idefics2ForConditionalGeneration(nn.Module): def __init__(self, prefix, config, weights): super().__init__() config.vision_config.quantize = None config.vision_config.speculator = config.speculator config.text_config.quantize = config.quantize config.text_config.speculator = config.speculator vision_config = config.vision_config self.text_model = load_text_model( prefix="model" if not prefix else f"{prefix}.model", config=config.text_config, weights=weights, name="text_model", ) self.dtype = weights.dtype # The vision and connector models are not quantized. with weights.use_loader(DefaultWeightsLoader(UnquantizedWeight)): self.vision_model = Idefics2VisionTransformer( prefix=( f"{prefix}.model.vision_model" if prefix else "model.vision_model" ), config=vision_config, weights=weights, ) config.quantize = None self.connector = Idefics2Connector( prefix=f"{prefix}.model.connector" if prefix else "model.connector", config=config, weights=weights, ) self.config = config self.image_seq_len = config.perceiver_config.resampler_n_latents self.image_token_id = config.image_token_id self.pad_token_id = ( config.pad_token_id if config.pad_token_id is not None else -1 ) def _merge_input_ids_with_image_features( self, input_ids: torch.Tensor, inputs_embeds: torch.Tensor, image_features: torch.Tensor, ): """In place merges in vision_embeddings with inputs_embeds.""" # mask = input_ids == self.config.image_token_index mask = input_ids == self.config.image_token_id # Let's pray we have enabled enough slots ! inputs_embeds[mask] = image_features.view(-1, image_features.shape[-1]) return inputs_embeds def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor] = None, pixel_values: torch.FloatTensor = None, pixel_attention_mask: Optional[torch.BoolTensor] = None, # Unused here image_sizes: Optional[torch.Tensor] = None, adapter_data: Optional[torch.Tensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, ): inputs_embeds = self.text_model.embed_tokens(input_ids) if pixel_values is not None: batch_size, num_images, num_channels, height, width = pixel_values.shape all_states = [] all_pixel_values = pixel_values all_pixel_mask = pixel_attention_mask for i in range(batch_size): pixel_values = all_pixel_values.to( dtype=self.dtype ) # fp16 compatibility pixel_values = pixel_values[i : i + 1] pixel_values = pixel_values.view(num_images, *pixel_values.shape[2:]) # Remove padding images - padding images are full 0. nb_values_per_image = pixel_values.shape[1:].numel() real_images_inds = (pixel_values == 0.0).sum( dim=(-1, -2, -3) ) != nb_values_per_image pixel_values = pixel_values[real_images_inds].contiguous() # Handle the vision attention mask if pixel_attention_mask is None: pixel_attention_mask = torch.ones( size=( pixel_values.size(0), pixel_values.size(2), pixel_values.size(3), ), dtype=torch.bool, device=pixel_values.device, ) else: # Remove padding images from the mask/pP p pixel_attention_mask = all_pixel_mask[i : i + 1] pixel_attention_mask = pixel_attention_mask.view( 1 * num_images, *pixel_attention_mask.shape[2:] ) pixel_attention_mask = pixel_attention_mask[ real_images_inds ].contiguous() patch_size = self.config.vision_config.patch_size patches_subgrid = pixel_attention_mask.unfold( dimension=1, size=patch_size, step=patch_size ) patches_subgrid = patches_subgrid.unfold( dimension=2, size=patch_size, step=patch_size ) patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool() # Get sequence from the vision encoder image_hidden_states = self.vision_model( pixel_values=pixel_values, patch_attention_mask=patch_attention_mask, ) # Modality projection & resampling image_hidden_states = self.connector( image_hidden_states, attention_mask=patch_attention_mask.view(pixel_values.size(0), -1), ) all_states.append(image_hidden_states) image_hidden_states = torch.stack(all_states, dim=0) # When we generate, we don't want to replace the potential image_token_id that we generated by images # that simply don't exist inputs_embeds = self._merge_input_ids_with_image_features( input_ids, inputs_embeds, image_hidden_states ) hidden_states = self.text_model.model( inputs_embeds=inputs_embeds, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=kv_cache, block_tables=block_tables, slots=slots, seqlen=seqlen, max_s=max_s, true_max_s=max_s, prefill_cache_indices=None, adapter_data=adapter_data, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits, speculative_logits = self.text_model.lm_head(hidden_states) return logits, speculative_logits
text-generation-inference/server/text_generation_server/models/custom_modeling/idefics2.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/idefics2.py", "repo_id": "text-generation-inference", "token_count": 15298 }
from typing import Optional, Tuple import warnings import math import torch from torch import nn from transformers.activations import ACT2FN from transformers.modeling_outputs import ( BaseModelOutputWithPooling, ) from transformers import SiglipConfig, SiglipVisionConfig from torch.nn.init import _calculate_fan_in_and_fan_out from text_generation_server.layers.tensor_parallel import ( TensorParallelEmbedding, TensorParallelColumnLinear, TensorParallelRowLinear, ) class SiglipVisionEmbeddings(nn.Module): def __init__(self, prefix, config: SiglipVisionConfig, weights): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, padding="valid", ) self.patch_embedding.weight = nn.Parameter( weights.get_tensor(f"{prefix}.patch_embedding.weight"), requires_grad=False ) self.patch_embedding.bias = nn.Parameter( weights.get_tensor(f"{prefix}.patch_embedding.bias"), requires_grad=False ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches self.position_embedding = TensorParallelEmbedding( prefix=f"{prefix}.position_embedding", weights=weights ) self.register_buffer( "position_ids", torch.arange(self.num_positions, device=weights.device).expand((1, -1)), persistent=False, ) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: patch_embeds = self.patch_embedding( pixel_values ) # shape = [*, width, grid, grid] embeddings = patch_embeds.flatten(2).transpose(1, 2) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class SiglipAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, prefix, config, weights): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.head_size = self.head_dim if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.num_heads = self.num_heads // weights.process_group.size() self.embed_dim = self.embed_dim // weights.process_group.size() self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.k_proj", weights=weights, bias=True ) self.v_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.v_proj", weights=weights, bias=True ) self.q_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.q_proj", weights=weights, bias=True ) self.out_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.out_proj", weights=weights, bias=True ) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return ( tensor.view(bsz, seq_len, self.num_heads, self.head_dim) .transpose(1, 2) .contiguous() ) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) # scale post matmul attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) * self.scale if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = ( attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) # upcast attention to fp32 attn_weights = nn.functional.softmax( attn_weights, dim=-1, dtype=torch.float32 ).to(attn_weights.dtype) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_size): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_size)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_size) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights class SiglipMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = TensorParallelColumnLinear.load( # config.hidden_size, config.intermediate_size prefix=f"{prefix}.fc1", config=config, weights=weights, bias=True ) self.fc2 = TensorParallelRowLinear.load( # config.intermediate_size, config.hidden_size prefix=f"{prefix}.fc2", config=config, weights=weights, bias=True ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class SiglipEncoderLayer(nn.Module): def __init__(self, prefix, config: SiglipConfig, weights): super().__init__() self.embed_dim = config.hidden_size self.self_attn = SiglipAttention( prefix=f"{prefix}.self_attn", config=config, weights=weights ) self.layer_norm1 = nn.LayerNorm.load( prefix=f"{prefix}.layer_norm1", weights=weights, eps=config.layer_norm_eps ) self.mlp = SiglipMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) self.layer_norm2 = nn.LayerNorm.load( prefix=f"{prefix}.layer_norm2", weights=weights, eps=config.layer_norm_eps ) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, ) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states, None class SiglipMultiheadAttentionPoolingHead(nn.Module): """Multihead Attention Pooling.""" def __init__(self, prefix, config: SiglipVisionConfig, weights): super().__init__() self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size)) self.attention = torch.nn.MultiheadAttention( config.hidden_size, config.num_attention_heads, batch_first=True ) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = SiglipMLP(prefix, config, weights) def forward(self, hidden_state): batch_size = hidden_state.shape[0] probe = self.probe.repeat(batch_size, 1, 1) hidden_state = self.attention(probe, hidden_state, hidden_state)[0] residual = hidden_state hidden_state = self.layernorm(hidden_state) hidden_state = residual + self.mlp(hidden_state) return hidden_state[:, 0] def _trunc_normal_(tensor, mean, std, a, b): # Cut & paste from PyTorch official master until it's in a few official releases - RW # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf def norm_cdf(x): # Computes standard normal cumulative distribution function return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 if (mean < a - 2 * std) or (mean > b + 2 * std): warnings.warn( "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " "The distribution of values may be incorrect.", stacklevel=2, ) # Values are generated by using a truncated uniform distribution and # then using the inverse CDF for the normal distribution. # Get upper and lower cdf values lower = norm_cdf((a - mean) / std) upper = norm_cdf((b - mean) / std) # Uniformly fill tensor with values from [l, u], then translate to # [2l-1, 2u-1]. tensor.uniform_(2 * lower - 1, 2 * upper - 1) # Use inverse cdf transform for normal distribution to get truncated # standard normal tensor.erfinv_() # Transform to proper mean, std tensor.mul_(std * math.sqrt(2.0)) tensor.add_(mean) # Clamp to ensure it's in the proper range tensor.clamp_(min=a, max=b) def trunc_normal_tf_( tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0, ) -> torch.Tensor: """Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \\leq \text{mean} \\leq b`. NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0 and the result is subsquently scaled and shifted by the mean and std args. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value """ with torch.no_grad(): _trunc_normal_(tensor, 0, 1.0, a, b) tensor.mul_(std).add_(mean) def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"): fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) if mode == "fan_in": denom = fan_in elif mode == "fan_out": denom = fan_out elif mode == "fan_avg": denom = (fan_in + fan_out) / 2 variance = scale / denom if distribution == "truncated_normal": # constant is stddev of standard normal truncated to (-2, 2) trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978) elif distribution == "normal": with torch.no_grad(): tensor.normal_(std=math.sqrt(variance)) elif distribution == "uniform": bound = math.sqrt(3 * variance) with torch.no_grad(): tensor.uniform_(-bound, bound) else: raise ValueError(f"invalid distribution {distribution}") def lecun_normal_(tensor): variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal") def default_flax_embed_init(tensor): variance_scaling_(tensor, mode="fan_in", distribution="normal") class SiglipEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`SiglipEncoderLayer`]. Args: config: SiglipConfig """ def __init__(self, prefix, config: SiglipConfig, weights): super().__init__() self.config = config self.layers = nn.ModuleList( [ SiglipEncoderLayer( prefix=f"{prefix}.layers.{i}", config=config, weights=weights ) for i in range(config.num_hidden_layers) ] ) def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, ): hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): hidden_states, _ = encoder_layer( hidden_states, attention_mask, ) return hidden_states class SiglipVisionTransformer(nn.Module): def __init__(self, prefix, config: SiglipVisionConfig, weights): super().__init__() self.config = config self.embeddings = SiglipVisionEmbeddings( prefix=f"{prefix}.embeddings", config=config, weights=weights ) self.encoder = SiglipEncoder( prefix=f"{prefix}.encoder", config=config, weights=weights ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, ): if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values) # NOTE: up until this point, the code logits are exactly # the same as the transformers code. The values evaulate # slightly differently in our encoder layer. encoder_outputs = self.encoder( inputs_embeds=hidden_states, ) last_hidden_state = encoder_outputs return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, # pooler_output=pooled_output, # hidden_states=encoder_outputs, )
text-generation-inference/server/text_generation_server/models/custom_modeling/siglip.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/siglip.py", "repo_id": "text-generation-inference", "token_count": 6676 }
import json import os from dataclasses import dataclass from typing import Optional, List from huggingface_hub import hf_hub_download from text_generation_server.layers.marlin.gptq import can_use_gptq_marlin from text_generation_server.utils.weights import ( DefaultWeightsLoader, WeightsLoader, ) # TODO: Split this config to have a single config type per quant method @dataclass class _QuantizerConfig: bits: int checkpoint_format: Optional[str] desc_act: bool groupsize: int quant_method: str sym: bool weight_block_size: Optional[List[int]] @dataclass class _FP8QuantizerConfig: activation_scale_ub: float def _get_config_json(model_id: str, revision: Optional[str], filename: str): if os.path.exists( os.path.join( model_id, ) ): filename = os.path.join(model_id, filename) else: filename = hf_hub_download(model_id, filename=filename, revision=revision) with open(filename, "r") as f: return json.load(f) # We should probably do this with Pydantic JSON deserialization, # but for now we'll stay close to the old _set_gptq_params. def _get_quantizer_config(model_id, revision): bits = 4 groupsize = -1 quant_method = "gptq" checkpoint_format = None sym = False desc_act = False weight_block_size = None filename = "config.json" try: data = _get_config_json(model_id, revision, filename) # FP8 config if data["quantization_config"]["quant_method"] == "fbgemm_fp8": return _FP8QuantizerConfig( activation_scale_ub=data["quantization_config"]["activation_scale_ub"] ) weight_block_size = data["quantization_config"].get("weight_block_size", None) if "zero_point" in data["quantization_config"]: sym = not data["quantization_config"]["zero_point"] quant_method = "awq" elif "sym" in data["quantization_config"]: sym = data["quantization_config"]["sym"] bits = data["quantization_config"]["bits"] groupsize = data["quantization_config"]["group_size"] # Order is important here, desc_act is missing on some real models quant_method = data["quantization_config"]["quant_method"] checkpoint_format = data["quantization_config"].get("checkpoint_format") desc_act = data["quantization_config"]["desc_act"] except Exception: filename = "quantize_config.json" try: data = _get_config_json(model_id, revision, filename) bits = data["bits"] groupsize = data["group_size"] if "zero_point" in data: sym = not data["zero_point"] quant_method = "awq" elif "sym" in data: sym = data["sym"] desc_act = data["desc_act"] if "version" in data and data["version"] == "GEMM": quant_method = "awq" except Exception: filename = "quant_config.json" try: data = _get_config_json(model_id, revision, filename) bits = data["w_bit"] groupsize = data["q_group_size"] desc_act = data["desc_act"] if "version" in data and data["version"] == "GEMM": quant_method = "awq" except Exception: pass return _QuantizerConfig( bits=bits, groupsize=groupsize, quant_method=quant_method, checkpoint_format=checkpoint_format, sym=sym, desc_act=desc_act, weight_block_size=weight_block_size, ) def get_loader( quantize: Optional[str], model_id: str, revision: Optional[str] ) -> WeightsLoader: if quantize == "compressed-tensors": config = _get_config_json(model_id, revision, "config.json") from text_generation_server.layers.compressed_tensors import ( CompressedTensorsLoader, ) return CompressedTensorsLoader(config) quantizer_config = _get_quantizer_config(model_id, revision) if quantize in {"awq", "gptq"}: from text_generation_server.layers.gptq import GPTQWeightsLoader # TODO: improve check once we have one config type per quantize value if not isinstance(quantizer_config, _QuantizerConfig): raise ValueError( f"Quantize is set to `{quantize}` but received a `{quantizer_config.__class__.__name__}` config." ) if can_use_gptq_marlin( bits=quantizer_config.bits, groupsize=quantizer_config.groupsize, quant_method=quantizer_config.quant_method, quantize=quantize, sym=quantizer_config.sym, ): from text_generation_server.layers.marlin import GPTQMarlinWeightsLoader return GPTQMarlinWeightsLoader( bits=quantizer_config.bits, desc_act=quantizer_config.desc_act, groupsize=quantizer_config.groupsize, quant_method=quantizer_config.quant_method, quantize=quantize, sym=quantizer_config.sym, ) else: return GPTQWeightsLoader( bits=quantizer_config.bits, desc_act=quantizer_config.desc_act, groupsize=quantizer_config.groupsize, quant_method=quantizer_config.quant_method, quantize=quantize, sym=quantizer_config.sym, ) elif quantize == "bitsandbytes": from text_generation_server.layers.bnb import BNBWeight return DefaultWeightsLoader(BNBWeight) elif quantize == "bitsandbytes-fp4": from text_generation_server.layers.bnb import BNBFP4Weight return DefaultWeightsLoader(BNBFP4Weight) elif quantize == "bitsandbytes-nf4": from text_generation_server.layers.bnb import BNBNF4Weight return DefaultWeightsLoader(BNBNF4Weight) elif quantize == "eetq": from text_generation_server.layers.eetq import EETQWeight return DefaultWeightsLoader(EETQWeight) elif quantize == "exl2": from text_generation_server.layers.exl2 import Exl2WeightsLoader return Exl2WeightsLoader() elif quantize == "marlin": from text_generation_server.layers.marlin import MarlinWeightsLoader # TODO: improve check once we have one config type per quantize value if not isinstance(quantizer_config, _QuantizerConfig): raise ValueError( f"Quantize is set to `{quantize}` but received a `{quantizer_config.__class__.__name__}` config." ) return MarlinWeightsLoader( bits=quantizer_config.bits, is_marlin_24=quantizer_config.checkpoint_format == "marlin_24", ) elif quantize == "fp8" or quantize is None: from text_generation_server.layers.fp8 import HybridFP8UnquantLoader # Since the default for the quantize config is _QuantizerConfig, # we need to add this check to not get an attribute error activation_scale_ub = None weight_block_size = quantizer_config.weight_block_size if isinstance(quantizer_config, _FP8QuantizerConfig): activation_scale_ub = quantizer_config.activation_scale_ub return HybridFP8UnquantLoader( activation_scale_ub, to_fp8=quantize == "fp8", weight_block_size=weight_block_size, ) else: raise ValueError(f"Unknown quantization method: {quantize}")
text-generation-inference/server/text_generation_server/utils/quantization.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/quantization.py", "repo_id": "text-generation-inference", "token_count": 3375 }
parser: '@typescript-eslint/parser' parserOptions: ecmaFeatures: jsx: true ecmaVersion: latest sourceType: module project: ./tsconfig.json env: browser: true es6: true node: true jest: true ignorePatterns: ['index.js', 'target/'] plugins: - import - '@typescript-eslint' extends: - eslint:recommended - plugin:prettier/recommended rules: # 0 = off, 1 = warn, 2 = error 'space-before-function-paren': 0 'no-useless-constructor': 0 'no-undef': 2 'no-console': [2, { allow: ['error', 'warn', 'info', 'assert'] }] 'comma-dangle': ['error', 'only-multiline'] 'no-unused-vars': 0 'no-var': 2 'one-var-declaration-per-line': 2 'prefer-const': 2 'no-const-assign': 2 'no-duplicate-imports': 2 'no-use-before-define': [2, { 'functions': false, 'classes': false }] 'eqeqeq': [2, 'always', { 'null': 'ignore' }] 'no-case-declarations': 0 'no-restricted-syntax': [ 2, { 'selector': 'BinaryExpression[operator=/(==|===|!=|!==)/][left.raw=true], BinaryExpression[operator=/(==|===|!=|!==)/][right.raw=true]', 'message': Don't compare for equality against boolean literals, }, ] # https://github.com/benmosher/eslint-plugin-import/pull/334 'import/no-duplicates': 2 'import/first': 2 'import/newline-after-import': 2 'import/order': [ 2, { 'newlines-between': 'always', 'alphabetize': { 'order': 'asc' }, 'groups': ['builtin', 'external', 'internal', 'parent', 'sibling', 'index'], }, ] overrides: - files: - ./**/*{.ts,.tsx} rules: 'no-unused-vars': [2, { varsIgnorePattern: '^_', argsIgnorePattern: '^_', ignoreRestSiblings: true }] 'no-undef': 0 # TypeScript declare merge 'no-redeclare': 0 'no-useless-constructor': 0 'no-dupe-class-members': 0 'no-case-declarations': 0 'no-duplicate-imports': 0 # TypeScript Interface and Type 'no-use-before-define': 0 '@typescript-eslint/adjacent-overload-signatures': 2 '@typescript-eslint/await-thenable': 2 '@typescript-eslint/consistent-type-assertions': 2 '@typescript-eslint/ban-types': [ 'error', { 'types': { 'String': { 'message': 'Use string instead', 'fixWith': 'string' }, 'Number': { 'message': 'Use number instead', 'fixWith': 'number' }, 'Boolean': { 'message': 'Use boolean instead', 'fixWith': 'boolean' }, 'Function': { 'message': 'Use explicit type instead' }, }, }, ] '@typescript-eslint/explicit-member-accessibility': [ 'error', { accessibility: 'explicit', overrides: { accessors: 'no-public', constructors: 'no-public', methods: 'no-public', properties: 'no-public', parameterProperties: 'explicit', }, }, ] '@typescript-eslint/method-signature-style': 2 '@typescript-eslint/no-floating-promises': 2 '@typescript-eslint/no-implied-eval': 2 '@typescript-eslint/no-for-in-array': 2 '@typescript-eslint/no-inferrable-types': 2 '@typescript-eslint/no-invalid-void-type': 2 '@typescript-eslint/no-misused-new': 2 '@typescript-eslint/no-misused-promises': 2 '@typescript-eslint/no-namespace': 2 '@typescript-eslint/no-non-null-asserted-optional-chain': 2 '@typescript-eslint/no-throw-literal': 2 '@typescript-eslint/no-unnecessary-boolean-literal-compare': 2 '@typescript-eslint/prefer-for-of': 2 '@typescript-eslint/prefer-nullish-coalescing': 2 '@typescript-eslint/switch-exhaustiveness-check': 2 '@typescript-eslint/prefer-optional-chain': 2 '@typescript-eslint/prefer-readonly': 2 '@typescript-eslint/prefer-string-starts-ends-with': 0 '@typescript-eslint/no-array-constructor': 2 '@typescript-eslint/require-await': 2 '@typescript-eslint/return-await': 2 '@typescript-eslint/ban-ts-comment': [2, { 'ts-expect-error': false, 'ts-ignore': true, 'ts-nocheck': true, 'ts-check': false }] '@typescript-eslint/naming-convention': [ 2, { selector: 'memberLike', format: ['camelCase', 'PascalCase'], modifiers: ['private'], leadingUnderscore: 'forbid', }, ] '@typescript-eslint/no-unused-vars': [2, { varsIgnorePattern: '^_', argsIgnorePattern: '^_', ignoreRestSiblings: true }] '@typescript-eslint/member-ordering': [ 2, { default: [ 'public-static-field', 'protected-static-field', 'private-static-field', 'public-static-method', 'protected-static-method', 'private-static-method', 'public-instance-field', 'protected-instance-field', 'private-instance-field', 'public-constructor', 'protected-constructor', 'private-constructor', 'public-instance-method', 'protected-instance-method', 'private-instance-method', ], }, ]
tokenizers/bindings/node/.eslintrc.yml/0
{ "file_path": "tokenizers/bindings/node/.eslintrc.yml", "repo_id": "tokenizers", "token_count": 2733 }
/* eslint-disable prettier/prettier */ // For a detailed explanation regarding each configuration property, visit: // https://jestjs.io/docs/en/configuration.html module.exports = { // All imported modules in your tests should be mocked automatically // automock: false, // Stop running tests after `n` failures // bail: 0, // Respect "browser" field in package.json when resolving modules // browser: false, // The directory where Jest should store its cached dependency information // cacheDirectory: "/private/var/folders/y_/n6h0fkqn3m57bg_ktk25j7rm0000gn/T/jest_dx", // Automatically clear mock calls and instances between every test // clearMocks: false, // Indicates whether the coverage information should be collected while executing the test // collectCoverage: false, // An array of glob patterns indicating a set of files for which coverage information should be collected // collectCoverageFrom: null, // The directory where Jest should output its coverage files // coverageDirectory: null, // An array of regexp pattern strings used to skip coverage collection // coveragePathIgnorePatterns: [ // "/node_modules/" // ], // A list of reporter names that Jest uses when writing coverage reports // coverageReporters: [ // "json", // "text", // "lcov", // "clover" // ], // An object that configures minimum threshold enforcement for coverage results // coverageThreshold: null, // A path to a custom dependency extractor // dependencyExtractor: null, // Make calling deprecated APIs throw helpful error messages // errorOnDeprecated: false, // Force coverage collection from ignored files using an array of glob patterns // forceCoverageMatch: [], // A path to a module which exports an async function that is triggered once before all test suites // globalSetup: null, // A path to a module which exports an async function that is triggered once after all test suites // globalTeardown: null, // A set of global variables that need to be available in all test environments // globals: {}, // The maximum amount of workers used to run your tests. Can be specified as % or a number. E.g. maxWorkers: 10% will use 10% of your CPU amount + 1 as the maximum worker number. maxWorkers: 2 will use a maximum of 2 workers. // maxWorkers: "50%", // An array of directory names to be searched recursively up from the requiring module's location // moduleDirectories: [ // "node_modules" // ], // An array of file extensions your modules use // moduleFileExtensions: [ // "js", // "json", // "jsx", // "ts", // "tsx", // "node" // ], // A map from regular expressions to module names that allow to stub out resources with a single module // moduleNameMapper: {}, // An array of regexp pattern strings, matched against all module paths before considered 'visible' to the module loader // modulePathIgnorePatterns: [], // Activates notifications for test results // notify: false, // An enum that specifies notification mode. Requires { notify: true } // notifyMode: "failure-change", // A preset that is used as a base for Jest's configuration preset: 'ts-jest', // Run tests from one or more projects // projects: null, // Use this configuration option to add custom reporters to Jest // reporters: undefined, // Automatically reset mock state between every test // resetMocks: false, // Reset the module registry before running each individual test // resetModules: false, // A path to a custom resolver // resolver: null, // Automatically restore mock state between every test // restoreMocks: false, // The root directory that Jest should scan for tests and modules within // rootDir: null, // A list of paths to directories that Jest should use to search for files in // roots: [ // "<rootDir>" // ], // Allows you to use a custom runner instead of Jest's default test runner // runner: "jest-runner", // The paths to modules that run some code to configure or set up the testing environment before each test // setupFiles: [], // A list of paths to modules that run some code to configure or set up the testing framework before each test // setupFilesAfterEnv: [], // A list of paths to snapshot serializer modules Jest should use for snapshot testing // snapshotSerializers: [], // The test environment that will be used for testing testEnvironment: 'node', // Options that will be passed to the testEnvironment // testEnvironmentOptions: {}, // Adds a location field to test results // testLocationInResults: false, // The glob patterns Jest uses to detect test files // testMatch: [ // "**/__tests__/**/*.[jt]s?(x)", // "**/?(*.)+(spec|test).[tj]s?(x)" // ], // An array of regexp pattern strings that are matched against all test paths, matched tests are skipped testPathIgnorePatterns: ['/node_modules/', '/dist/'], // The regexp pattern or array of patterns that Jest uses to detect test files // testRegex: [], // This option allows the use of a custom results processor // testResultsProcessor: null, // This option allows use of a custom test runner // testRunner: "jasmine2", // This option sets the URL for the jsdom environment. It is reflected in properties such as location.href // testURL: "http://localhost", // Setting this value to "fake" allows the use of fake timers for functions such as "setTimeout" // timers: "real", // A map from regular expressions to paths to transformers // transform: null, // An array of regexp pattern strings that are matched against all source file paths, matched files will skip transformation // transformIgnorePatterns: [ // "/node_modules/" // ], // An array of regexp pattern strings that are matched against all modules before the module loader will automatically return a mock for them // unmockedModulePathPatterns: undefined, // Indicates whether each individual test should be reported during the run // verbose: null, // An array of regexp patterns that are matched against all source file paths before re-running tests in watch mode watchPathIgnorePatterns: ['<rootDir>/node_modules/', '<rootDir>/native/', '<rootDir>/dist/', '<rootDir>/build/'], // Whether to use watchman for file crawling // watchman: true, }
tokenizers/bindings/node/jest.config.js/0
{ "file_path": "tokenizers/bindings/node/jest.config.js", "repo_id": "tokenizers", "token_count": 1715 }