text
stringlengths
7
318k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
439
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unets.unet_2d_blocks_flax import ( FlaxCrossAttnDownBlock2D, FlaxDownBlock2D, FlaxUNetMidBlock2DCrossAttn, ) @flax.struct.dataclass class FlaxControlNetOutput(BaseOutput): """ The output of [`FlaxControlNetModel`]. Args: down_block_res_samples (`jnp.ndarray`): mid_block_res_sample (`jnp.ndarray`): """ down_block_res_samples: jnp.ndarray mid_block_res_sample: jnp.ndarray class FlaxControlNetConditioningEmbedding(nn.Module): conditioning_embedding_channels: int block_out_channels: Tuple[int, ...] = (16, 32, 96, 256) dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.conv_in = nn.Conv( self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks = [] for i in range(len(self.block_out_channels) - 1): channel_in = self.block_out_channels[i] channel_out = self.block_out_channels[i + 1] conv1 = nn.Conv( channel_in, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(conv1) conv2 = nn.Conv( channel_out, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(conv2) self.blocks = blocks self.conv_out = nn.Conv( self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__(self, conditioning: jnp.ndarray) -> jnp.ndarray: embedding = self.conv_in(conditioning) embedding = nn.silu(embedding) for block in self.blocks: embedding = block(embedding) embedding = nn.silu(embedding) embedding = self.conv_out(embedding) return embedding @flax_register_to_config class FlaxControlNetModel(nn.Module, FlaxModelMixin, ConfigMixin): r""" A ControlNet model. This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for it’s generic methods implemented for all models (such as downloading or saving). This model is also a Flax Linen [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax Linen module and refer to the Flax documentation for all matters related to its general usage and behavior. Inherent JAX features such as the following are supported: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: sample_size (`int`, *optional*): The size of the input sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. down_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D")`): The tuple of downsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8): The dimension of the attention heads. num_attention_heads (`int` or `Tuple[int]`, *optional*): The number of attention heads. cross_attention_dim (`int`, *optional*, defaults to 768): The dimension of the cross attention features. dropout (`float`, *optional*, defaults to 0): Dropout probability for down, up and bottleneck blocks. flip_sin_to_cos (`bool`, *optional*, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. controlnet_conditioning_channel_order (`str`, *optional*, defaults to `rgb`): The channel order of conditional image. Will convert to `rgb` if it's `bgr`. conditioning_embedding_out_channels (`tuple`, *optional*, defaults to `(16, 32, 96, 256)`): The tuple of output channel for each block in the `conditioning_embedding` layer. """ sample_size: int = 32 in_channels: int = 4 down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) only_cross_attention: Union[bool, Tuple[bool, ...]] = False block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280) layers_per_block: int = 2 attention_head_dim: Union[int, Tuple[int, ...]] = 8 num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None cross_attention_dim: int = 1280 dropout: float = 0.0 use_linear_projection: bool = False dtype: jnp.dtype = jnp.float32 flip_sin_to_cos: bool = True freq_shift: int = 0 controlnet_conditioning_channel_order: str = "rgb" conditioning_embedding_out_channels: Tuple[int, ...] = (16, 32, 96, 256) def init_weights(self, rng: jax.Array) -> FrozenDict: # init input tensors sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) timesteps = jnp.ones((1,), dtype=jnp.int32) encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) controlnet_cond_shape = (1, 3, self.sample_size * 8, self.sample_size * 8) controlnet_cond = jnp.zeros(controlnet_cond_shape, dtype=jnp.float32) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} return self.init(rngs, sample, timesteps, encoder_hidden_states, controlnet_cond)["params"] def setup(self) -> None: block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = self.num_attention_heads or self.attention_head_dim # input self.conv_in = nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time self.time_proj = FlaxTimesteps( block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift ) self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype) self.controlnet_cond_embedding = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, ) only_cross_attention = self.only_cross_attention if isinstance(only_cross_attention, bool): only_cross_attention = (only_cross_attention,) * len(self.down_block_types) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(self.down_block_types) # down down_blocks = [] controlnet_down_blocks = [] output_channel = block_out_channels[0] controlnet_block = nn.Conv( output_channel, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(controlnet_block) for i, down_block_type in enumerate(self.down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 if down_block_type == "CrossAttnDownBlock2D": down_block = FlaxCrossAttnDownBlock2D( in_channels=input_channel, out_channels=output_channel, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, ) else: down_block = FlaxDownBlock2D( in_channels=input_channel, out_channels=output_channel, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, ) down_blocks.append(down_block) for _ in range(self.layers_per_block): controlnet_block = nn.Conv( output_channel, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(controlnet_block) if not is_final_block: controlnet_block = nn.Conv( output_channel, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(controlnet_block) self.down_blocks = down_blocks self.controlnet_down_blocks = controlnet_down_blocks # mid mid_block_channel = block_out_channels[-1] self.mid_block = FlaxUNetMidBlock2DCrossAttn( in_channels=mid_block_channel, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, ) self.controlnet_mid_block = nn.Conv( mid_block_channel, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self, sample: jnp.ndarray, timesteps: Union[jnp.ndarray, float, int], encoder_hidden_states: jnp.ndarray, controlnet_cond: jnp.ndarray, conditioning_scale: float = 1.0, return_dict: bool = True, train: bool = False, ) -> Union[FlaxControlNetOutput, Tuple[Tuple[jnp.ndarray, ...], jnp.ndarray]]: r""" Args: sample (`jnp.ndarray`): (batch, channel, height, width) noisy inputs tensor timestep (`jnp.ndarray` or `float` or `int`): timesteps encoder_hidden_states (`jnp.ndarray`): (batch_size, sequence_length, hidden_size) encoder hidden states controlnet_cond (`jnp.ndarray`): (batch, channel, height, width) the conditional input tensor conditioning_scale (`float`, *optional*, defaults to `1.0`): the scale factor for controlnet outputs return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] instead of a plain tuple. train (`bool`, *optional*, defaults to `False`): Use deterministic functions and disable dropout when not training. Returns: [`~models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] or `tuple`: [`~models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ channel_order = self.controlnet_conditioning_channel_order if channel_order == "bgr": controlnet_cond = jnp.flip(controlnet_cond, axis=1) # 1. time if not isinstance(timesteps, jnp.ndarray): timesteps = jnp.array([timesteps], dtype=jnp.int32) elif isinstance(timesteps, jnp.ndarray) and len(timesteps.shape) == 0: timesteps = timesteps.astype(dtype=jnp.float32) timesteps = jnp.expand_dims(timesteps, 0) t_emb = self.time_proj(timesteps) t_emb = self.time_embedding(t_emb) # 2. pre-process sample = jnp.transpose(sample, (0, 2, 3, 1)) sample = self.conv_in(sample) controlnet_cond = jnp.transpose(controlnet_cond, (0, 2, 3, 1)) controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) sample += controlnet_cond # 3. down down_block_res_samples = (sample,) for down_block in self.down_blocks: if isinstance(down_block, FlaxCrossAttnDownBlock2D): sample, res_samples = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train) else: sample, res_samples = down_block(sample, t_emb, deterministic=not train) down_block_res_samples += res_samples # 4. mid sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train) # 5. contronet blocks controlnet_down_block_res_samples = () for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): down_block_res_sample = controlnet_block(down_block_res_sample) controlnet_down_block_res_samples += (down_block_res_sample,) down_block_res_samples = controlnet_down_block_res_samples mid_block_res_sample = self.controlnet_mid_block(sample) # 6. scaling down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample )
diffusers/src/diffusers/models/controlnet_flax.py/0
{ "file_path": "diffusers/src/diffusers/models/controlnet_flax.py", "repo_id": "diffusers", "token_count": 7636 }
111
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ..embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from .unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block @dataclass class UNet2DOutput(BaseOutput): """ The output of [`UNet2DModel`]. Args: sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): The hidden states output from the last layer of the model. """ sample: torch.FloatTensor class UNet2DModel(ModelMixin, ConfigMixin): r""" A 2D UNet model that takes a noisy sample and a timestep and returns a sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. Dimensions must be a multiple of `2 ** (len(block_out_channels) - 1)`. in_channels (`int`, *optional*, defaults to 3): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 3): Number of channels in the output. center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. time_embedding_type (`str`, *optional*, defaults to `"positional"`): Type of time embedding to use. freq_shift (`int`, *optional*, defaults to 0): Frequency shift for Fourier time embedding. flip_sin_to_cos (`bool`, *optional*, defaults to `True`): Whether to flip sin to cos for Fourier time embedding. down_block_types (`Tuple[str]`, *optional*, defaults to `("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D")`): Tuple of downsample block types. mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2D"`): Block type for middle of UNet, it can be either `UNetMidBlock2D` or `UnCLIPUNetMidBlock2D`. up_block_types (`Tuple[str]`, *optional*, defaults to `("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D")`): Tuple of upsample block types. block_out_channels (`Tuple[int]`, *optional*, defaults to `(224, 448, 672, 896)`): Tuple of block output channels. layers_per_block (`int`, *optional*, defaults to `2`): The number of layers per block. mid_block_scale_factor (`float`, *optional*, defaults to `1`): The scale factor for the mid block. downsample_padding (`int`, *optional*, defaults to `1`): The padding for the downsample convolution. downsample_type (`str`, *optional*, defaults to `conv`): The downsample type for downsampling layers. Choose between "conv" and "resnet" upsample_type (`str`, *optional*, defaults to `conv`): The upsample type for upsampling layers. Choose between "conv" and "resnet" dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. attention_head_dim (`int`, *optional*, defaults to `8`): The attention head dimension. norm_num_groups (`int`, *optional*, defaults to `32`): The number of groups for normalization. attn_norm_num_groups (`int`, *optional*, defaults to `None`): If set to an integer, a group norm layer will be created in the mid block's [`Attention`] layer with the given number of groups. If left as `None`, the group norm layer will only be created if `resnet_time_scale_shift` is set to `default`, and if created will have `norm_num_groups` groups. norm_eps (`float`, *optional*, defaults to `1e-5`): The epsilon for normalization. resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`. class_embed_type (`str`, *optional*, defaults to `None`): The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, `"timestep"`, or `"identity"`. num_class_embeds (`int`, *optional*, defaults to `None`): Input dimension of the learnable embedding matrix to be projected to `time_embed_dim` when performing class conditioning with `class_embed_type` equal to `None`. """ @register_to_config def __init__( self, sample_size: Optional[Union[int, Tuple[int, int]]] = None, in_channels: int = 3, out_channels: int = 3, center_input_sample: bool = False, time_embedding_type: str = "positional", freq_shift: int = 0, flip_sin_to_cos: bool = True, down_block_types: Tuple[str] = ("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D"), up_block_types: Tuple[str] = ("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D"), block_out_channels: Tuple[int] = (224, 448, 672, 896), layers_per_block: int = 2, mid_block_scale_factor: float = 1, downsample_padding: int = 1, downsample_type: str = "conv", upsample_type: str = "conv", dropout: float = 0.0, act_fn: str = "silu", attention_head_dim: Optional[int] = 8, norm_num_groups: int = 32, attn_norm_num_groups: Optional[int] = None, norm_eps: float = 1e-5, resnet_time_scale_shift: str = "default", add_attention: bool = True, class_embed_type: Optional[str] = None, num_class_embeds: Optional[int] = None, num_train_timesteps: Optional[int] = None, ): super().__init__() self.sample_size = sample_size time_embed_dim = block_out_channels[0] * 4 # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) # input self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1)) # time if time_embedding_type == "fourier": self.time_proj = GaussianFourierProjection(embedding_size=block_out_channels[0], scale=16) timestep_input_dim = 2 * block_out_channels[0] elif time_embedding_type == "positional": self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] elif time_embedding_type == "learned": self.time_proj = nn.Embedding(num_train_timesteps, block_out_channels[0]) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) else: self.class_embedding = None self.down_blocks = nn.ModuleList([]) self.mid_block = None self.up_blocks = nn.ModuleList([]) # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, downsample_type=downsample_type, dropout=dropout, ) self.down_blocks.append(down_block) # mid self.mid_block = UNetMidBlock2D( in_channels=block_out_channels[-1], temb_channels=time_embed_dim, dropout=dropout, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, attention_head_dim=attention_head_dim if attention_head_dim is not None else block_out_channels[-1], resnet_groups=norm_num_groups, attn_groups=attn_norm_num_groups, add_attention=add_attention, ) # up reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] is_final_block = i == len(block_out_channels) - 1 up_block = get_up_block( up_block_type, num_layers=layers_per_block + 1, in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=time_embed_dim, add_upsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel, resnet_time_scale_shift=resnet_time_scale_shift, upsample_type=upsample_type, dropout=dropout, ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=num_groups_out, eps=norm_eps) self.conv_act = nn.SiLU() self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1) def forward( self, sample: torch.FloatTensor, timestep: Union[torch.Tensor, float, int], class_labels: Optional[torch.Tensor] = None, return_dict: bool = True, ) -> Union[UNet2DOutput, Tuple]: r""" The [`UNet2DModel`] forward method. Args: sample (`torch.FloatTensor`): The noisy input tensor with the following shape `(batch, channel, height, width)`. timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. class_labels (`torch.FloatTensor`, *optional*, defaults to `None`): Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple. Returns: [`~models.unet_2d.UNet2DOutput`] or `tuple`: If `return_dict` is True, an [`~models.unet_2d.UNet2DOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ # 0. center input if necessary if self.config.center_input_sample: sample = 2 * sample - 1.0 # 1. time timesteps = timestep if not torch.is_tensor(timesteps): timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps * torch.ones(sample.shape[0], dtype=timesteps.dtype, device=timesteps.device) t_emb = self.time_proj(timesteps) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=self.dtype) emb = self.time_embedding(t_emb) if self.class_embedding is not None: if class_labels is None: raise ValueError("class_labels should be provided when doing class conditioning") if self.config.class_embed_type == "timestep": class_labels = self.time_proj(class_labels) class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) emb = emb + class_emb elif self.class_embedding is None and class_labels is not None: raise ValueError("class_embedding needs to be initialized in order to use class conditioning") # 2. pre-process skip_sample = sample sample = self.conv_in(sample) # 3. down down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "skip_conv"): sample, res_samples, skip_sample = downsample_block( hidden_states=sample, temb=emb, skip_sample=skip_sample ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb) down_block_res_samples += res_samples # 4. mid sample = self.mid_block(sample, emb) # 5. up skip_sample = None for upsample_block in self.up_blocks: res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] if hasattr(upsample_block, "skip_conv"): sample, skip_sample = upsample_block(sample, res_samples, emb, skip_sample) else: sample = upsample_block(sample, res_samples, emb) # 6. post-process sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) if skip_sample is not None: sample += skip_sample if self.config.time_embedding_type == "fourier": timesteps = timesteps.reshape((sample.shape[0], *([1] * len(sample.shape[1:])))) sample = sample / timesteps if not return_dict: return (sample,) return UNet2DOutput(sample=sample)
diffusers/src/diffusers/models/unets/unet_2d.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_2d.py", "repo_id": "diffusers", "token_count": 7251 }
112
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import OptionalDependencyNotAvailable, is_torch_available, is_transformers_available try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .blip_image_processing import BlipImageProcessor from .modeling_blip2 import Blip2QFormerModel from .modeling_ctx_clip import ContextCLIPTextModel from .pipeline_blip_diffusion import BlipDiffusionPipeline
diffusers/src/diffusers/pipelines/blip_diffusion/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/blip_diffusion/__init__.py", "repo_id": "diffusers", "token_count": 219 }
113
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from functools import partial from typing import Dict, List, Optional, Union import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict from flax.jax_utils import unreplicate from flax.training.common_utils import shard from PIL import Image from transformers import CLIPFeatureExtractor, CLIPTokenizer, FlaxCLIPTextModel from ...models import FlaxAutoencoderKL, FlaxControlNetModel, FlaxUNet2DConditionModel from ...schedulers import ( FlaxDDIMScheduler, FlaxDPMSolverMultistepScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, ) from ...utils import PIL_INTERPOLATION, logging, replace_example_docstring from ..pipeline_flax_utils import FlaxDiffusionPipeline from ..stable_diffusion import FlaxStableDiffusionPipelineOutput from ..stable_diffusion.safety_checker_flax import FlaxStableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Set to True to use python for loop instead of jax.fori_loop for easier debugging DEBUG = False EXAMPLE_DOC_STRING = """ Examples: ```py >>> import jax >>> import numpy as np >>> import jax.numpy as jnp >>> from flax.jax_utils import replicate >>> from flax.training.common_utils import shard >>> from diffusers.utils import load_image, make_image_grid >>> from PIL import Image >>> from diffusers import FlaxStableDiffusionControlNetPipeline, FlaxControlNetModel >>> def create_key(seed=0): ... return jax.random.PRNGKey(seed) >>> rng = create_key(0) >>> # get canny image >>> canny_image = load_image( ... "https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/blog_post_cell_10_output_0.jpeg" ... ) >>> prompts = "best quality, extremely detailed" >>> negative_prompts = "monochrome, lowres, bad anatomy, worst quality, low quality" >>> # load control net and stable diffusion v1-5 >>> controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( ... "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.float32 ... ) >>> pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, revision="flax", dtype=jnp.float32 ... ) >>> params["controlnet"] = controlnet_params >>> num_samples = jax.device_count() >>> rng = jax.random.split(rng, jax.device_count()) >>> prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) >>> negative_prompt_ids = pipe.prepare_text_inputs([negative_prompts] * num_samples) >>> processed_image = pipe.prepare_image_inputs([canny_image] * num_samples) >>> p_params = replicate(params) >>> prompt_ids = shard(prompt_ids) >>> negative_prompt_ids = shard(negative_prompt_ids) >>> processed_image = shard(processed_image) >>> output = pipe( ... prompt_ids=prompt_ids, ... image=processed_image, ... params=p_params, ... prng_seed=rng, ... num_inference_steps=50, ... neg_prompt_ids=negative_prompt_ids, ... jit=True, ... ).images >>> output_images = pipe.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:]))) >>> output_images = make_image_grid(output_images, num_samples // 4, 4) >>> output_images.save("generated_image.png") ``` """ class FlaxStableDiffusionControlNetPipeline(FlaxDiffusionPipeline): r""" Flax-based pipeline for text-to-image generation using Stable Diffusion with ControlNet Guidance. This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`FlaxAutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.FlaxCLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`FlaxUNet2DConditionModel`]): A `FlaxUNet2DConditionModel` to denoise the encoded image latents. controlnet ([`FlaxControlNetModel`]: Provides additional conditioning to the `unet` during the denoising process. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or [`FlaxDPMSolverMultistepScheduler`]. safety_checker ([`FlaxStableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ def __init__( self, vae: FlaxAutoencoderKL, text_encoder: FlaxCLIPTextModel, tokenizer: CLIPTokenizer, unet: FlaxUNet2DConditionModel, controlnet: FlaxControlNetModel, scheduler: Union[ FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler ], safety_checker: FlaxStableDiffusionSafetyChecker, feature_extractor: CLIPFeatureExtractor, dtype: jnp.dtype = jnp.float32, ): super().__init__() self.dtype = dtype if safety_checker is None: logger.warn( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def prepare_text_inputs(self, prompt: Union[str, List[str]]): if not isinstance(prompt, (str, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") text_input = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="np", ) return text_input.input_ids def prepare_image_inputs(self, image: Union[Image.Image, List[Image.Image]]): if not isinstance(image, (Image.Image, list)): raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}") if isinstance(image, Image.Image): image = [image] processed_images = jnp.concatenate([preprocess(img, jnp.float32) for img in image]) return processed_images def _get_has_nsfw_concepts(self, features, params): has_nsfw_concepts = self.safety_checker(features, params) return has_nsfw_concepts def _run_safety_checker(self, images, safety_model_params, jit=False): # safety_model_params should already be replicated when jit is True pil_images = [Image.fromarray(image) for image in images] features = self.feature_extractor(pil_images, return_tensors="np").pixel_values if jit: features = shard(features) has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params) has_nsfw_concepts = unshard(has_nsfw_concepts) safety_model_params = unreplicate(safety_model_params) else: has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params) images_was_copied = False for idx, has_nsfw_concept in enumerate(has_nsfw_concepts): if has_nsfw_concept: if not images_was_copied: images_was_copied = True images = images.copy() images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image if any(has_nsfw_concepts): warnings.warn( "Potential NSFW content was detected in one or more images. A black image will be returned" " instead. Try again with a different prompt and/or seed." ) return images, has_nsfw_concepts def _generate( self, prompt_ids: jnp.ndarray, image: jnp.ndarray, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int, guidance_scale: float, latents: Optional[jnp.ndarray] = None, neg_prompt_ids: Optional[jnp.ndarray] = None, controlnet_conditioning_scale: float = 1.0, ): height, width = image.shape[-2:] if height % 64 != 0 or width % 64 != 0: raise ValueError(f"`height` and `width` have to be divisible by 64 but are {height} and {width}.") # get prompt text embeddings prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0] # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0` # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0` batch_size = prompt_ids.shape[0] max_length = prompt_ids.shape[-1] if neg_prompt_ids is None: uncond_input = self.tokenizer( [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np" ).input_ids else: uncond_input = neg_prompt_ids negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0] context = jnp.concatenate([negative_prompt_embeds, prompt_embeds]) image = jnp.concatenate([image] * 2) latents_shape = ( batch_size, self.unet.config.in_channels, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if latents is None: latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") def loop_body(step, args): latents, scheduler_state = args # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes latents_input = jnp.concatenate([latents] * 2) t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] timestep = jnp.broadcast_to(t, latents_input.shape[0]) latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) down_block_res_samples, mid_block_res_sample = self.controlnet.apply( {"params": params["controlnet"]}, jnp.array(latents_input), jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=context, controlnet_cond=image, conditioning_scale=controlnet_conditioning_scale, return_dict=False, ) # predict the noise residual noise_pred = self.unet.apply( {"params": params["unet"]}, jnp.array(latents_input), jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=context, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, ).sample # perform guidance noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() return latents, scheduler_state scheduler_state = self.scheduler.set_timesteps( params["scheduler"], num_inference_steps=num_inference_steps, shape=latents_shape ) # scale the initial noise by the standard deviation required by the scheduler latents = latents * params["scheduler"].init_noise_sigma if DEBUG: # run with python for loop for i in range(num_inference_steps): latents, scheduler_state = loop_body(i, (latents, scheduler_state)) else: latents, _ = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state)) # scale and decode the image latents with vae latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) return image @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt_ids: jnp.ndarray, image: jnp.ndarray, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int = 50, guidance_scale: Union[float, jnp.ndarray] = 7.5, latents: jnp.ndarray = None, neg_prompt_ids: jnp.ndarray = None, controlnet_conditioning_scale: Union[float, jnp.ndarray] = 1.0, return_dict: bool = True, jit: bool = False, ): r""" The call function to the pipeline for generation. Args: prompt_ids (`jnp.ndarray`): The prompt or prompts to guide the image generation. image (`jnp.ndarray`): Array representing the ControlNet input condition to provide guidance to the `unet` for generation. params (`Dict` or `FrozenDict`): Dictionary containing the model parameters/weights. prng_seed (`jax.Array`): Array containing random number generator key. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. latents (`jnp.ndarray`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents array is generated by sampling using the supplied random `generator`. controlnet_conditioning_scale (`float` or `jnp.ndarray`, *optional*, defaults to 1.0): The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original `unet`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of a plain tuple. jit (`bool`, defaults to `False`): Whether to run `pmap` versions of the generation and safety scoring functions. <Tip warning={true}> This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a future release. </Tip> Examples: Returns: [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ height, width = image.shape[-2:] if isinstance(guidance_scale, float): # Convert to a tensor so each device gets a copy. Follow the prompt_ids for # shape information, as they may be sharded (when `jit` is `True`), or not. guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) if len(prompt_ids.shape) > 2: # Assume sharded guidance_scale = guidance_scale[:, None] if isinstance(controlnet_conditioning_scale, float): # Convert to a tensor so each device gets a copy. Follow the prompt_ids for # shape information, as they may be sharded (when `jit` is `True`), or not. controlnet_conditioning_scale = jnp.array([controlnet_conditioning_scale] * prompt_ids.shape[0]) if len(prompt_ids.shape) > 2: # Assume sharded controlnet_conditioning_scale = controlnet_conditioning_scale[:, None] if jit: images = _p_generate( self, prompt_ids, image, params, prng_seed, num_inference_steps, guidance_scale, latents, neg_prompt_ids, controlnet_conditioning_scale, ) else: images = self._generate( prompt_ids, image, params, prng_seed, num_inference_steps, guidance_scale, latents, neg_prompt_ids, controlnet_conditioning_scale, ) if self.safety_checker is not None: safety_params = params["safety_checker"] images_uint8_casted = (images * 255).round().astype("uint8") num_devices, batch_size = images.shape[:2] images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3) images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit) images = np.array(images) # block images if any(has_nsfw_concept): for i, is_nsfw in enumerate(has_nsfw_concept): if is_nsfw: images[i] = np.asarray(images_uint8_casted[i]) images = images.reshape(num_devices, batch_size, height, width, 3) else: images = np.asarray(images) has_nsfw_concept = False if not return_dict: return (images, has_nsfw_concept) return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept) # Static argnums are pipe, num_inference_steps. A change would trigger recompilation. # Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). @partial( jax.pmap, in_axes=(None, 0, 0, 0, 0, None, 0, 0, 0, 0), static_broadcasted_argnums=(0, 5), ) def _p_generate( pipe, prompt_ids, image, params, prng_seed, num_inference_steps, guidance_scale, latents, neg_prompt_ids, controlnet_conditioning_scale, ): return pipe._generate( prompt_ids, image, params, prng_seed, num_inference_steps, guidance_scale, latents, neg_prompt_ids, controlnet_conditioning_scale, ) @partial(jax.pmap, static_broadcasted_argnums=(0,)) def _p_get_has_nsfw_concepts(pipe, features, params): return pipe._get_has_nsfw_concepts(features, params) def unshard(x: jnp.ndarray): # einops.rearrange(x, 'd b ... -> (d b) ...') num_devices, batch_size = x.shape[:2] rest = x.shape[2:] return x.reshape(num_devices * batch_size, *rest) def preprocess(image, dtype): image = image.convert("RGB") w, h = image.size w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = jnp.array(image).astype(dtype) / 255.0 image = image[None].transpose(0, 3, 1, 2) return image
diffusers/src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py", "repo_id": "diffusers", "token_count": 9958 }
114
fast27_timesteps = [ 999, 800, 799, 600, 599, 500, 400, 399, 377, 355, 333, 311, 288, 266, 244, 222, 200, 199, 177, 155, 133, 111, 88, 66, 44, 22, 0, ] smart27_timesteps = [ 999, 976, 952, 928, 905, 882, 858, 857, 810, 762, 715, 714, 572, 429, 428, 286, 285, 238, 190, 143, 142, 118, 95, 71, 47, 24, 0, ] smart50_timesteps = [ 999, 988, 977, 966, 955, 944, 933, 922, 911, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 350, 300, 299, 266, 233, 200, 199, 179, 159, 140, 120, 100, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] smart100_timesteps = [ 999, 995, 992, 989, 985, 981, 978, 975, 971, 967, 964, 961, 957, 956, 951, 947, 942, 937, 933, 928, 923, 919, 914, 913, 908, 903, 897, 892, 887, 881, 876, 871, 870, 864, 858, 852, 846, 840, 834, 828, 827, 820, 813, 806, 799, 792, 785, 784, 777, 770, 763, 756, 749, 742, 741, 733, 724, 716, 707, 699, 698, 688, 677, 666, 656, 655, 645, 634, 623, 613, 612, 598, 584, 570, 569, 555, 541, 527, 526, 505, 484, 483, 462, 440, 439, 396, 395, 352, 351, 308, 307, 264, 263, 220, 219, 176, 132, 88, 44, 0, ] smart185_timesteps = [ 999, 997, 995, 992, 990, 988, 986, 984, 981, 979, 977, 975, 972, 970, 968, 966, 964, 961, 959, 957, 956, 954, 951, 949, 946, 944, 941, 939, 936, 934, 931, 929, 926, 924, 921, 919, 916, 914, 913, 910, 907, 905, 902, 899, 896, 893, 891, 888, 885, 882, 879, 877, 874, 871, 870, 867, 864, 861, 858, 855, 852, 849, 846, 843, 840, 837, 834, 831, 828, 827, 824, 821, 817, 814, 811, 808, 804, 801, 798, 795, 791, 788, 785, 784, 780, 777, 774, 770, 766, 763, 760, 756, 752, 749, 746, 742, 741, 737, 733, 730, 726, 722, 718, 714, 710, 707, 703, 699, 698, 694, 690, 685, 681, 677, 673, 669, 664, 660, 656, 655, 650, 646, 641, 636, 632, 627, 622, 618, 613, 612, 607, 602, 596, 591, 586, 580, 575, 570, 569, 563, 557, 551, 545, 539, 533, 527, 526, 519, 512, 505, 498, 491, 484, 483, 474, 466, 457, 449, 440, 439, 428, 418, 407, 396, 395, 381, 366, 352, 351, 330, 308, 307, 286, 264, 263, 242, 220, 219, 176, 175, 132, 131, 88, 44, 0, ] super27_timesteps = [ 999, 991, 982, 974, 966, 958, 950, 941, 933, 925, 916, 908, 900, 899, 874, 850, 825, 800, 799, 700, 600, 500, 400, 300, 200, 100, 0, ] super40_timesteps = [ 999, 992, 985, 978, 971, 964, 957, 949, 942, 935, 928, 921, 914, 907, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 300, 299, 200, 199, 100, 99, 0, ] super100_timesteps = [ 999, 996, 992, 989, 985, 982, 979, 975, 972, 968, 965, 961, 958, 955, 951, 948, 944, 941, 938, 934, 931, 927, 924, 920, 917, 914, 910, 907, 903, 900, 899, 891, 884, 876, 869, 861, 853, 846, 838, 830, 823, 815, 808, 800, 799, 788, 777, 766, 755, 744, 733, 722, 711, 700, 699, 688, 677, 666, 655, 644, 633, 622, 611, 600, 599, 585, 571, 557, 542, 528, 514, 500, 499, 485, 471, 457, 442, 428, 414, 400, 399, 379, 359, 340, 320, 300, 299, 279, 259, 240, 220, 200, 199, 166, 133, 100, 99, 66, 33, 0, ]
diffusers/src/diffusers/pipelines/deepfloyd_if/timesteps.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deepfloyd_if/timesteps.py", "repo_id": "diffusers", "token_count": 3772 }
115
from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {"pipeline_repaint": ["RePaintPipeline"]} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_repaint import RePaintPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, )
diffusers/src/diffusers/pipelines/deprecated/repaint/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/repaint/__init__.py", "repo_id": "diffusers", "token_count": 183 }
116
from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {"pipeline_stochastic_karras_ve": ["KarrasVePipeline"]} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_stochastic_karras_ve import KarrasVePipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, )
diffusers/src/diffusers/pipelines/deprecated/stochastic_karras_ve/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/stochastic_karras_ve/__init__.py", "repo_id": "diffusers", "token_count": 199 }
117
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, List, Optional, Union import PIL.Image import torch from transformers import ( CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, XLMRobertaTokenizer, ) from ...models import PriorTransformer, UNet2DConditionModel, VQModel from ...schedulers import DDIMScheduler, DDPMScheduler, UnCLIPScheduler from ...utils import ( replace_example_docstring, ) from ..pipeline_utils import DiffusionPipeline from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline from .text_encoder import MultilingualCLIP TEXT2IMAGE_EXAMPLE_DOC_STRING = """ Examples: ```py from diffusers import AutoPipelineForText2Image import torch pipe = AutoPipelineForText2Image.from_pretrained( "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" image = pipe(prompt=prompt, num_inference_steps=25).images[0] ``` """ IMAGE2IMAGE_EXAMPLE_DOC_STRING = """ Examples: ```py from diffusers import AutoPipelineForImage2Image import torch import requests from io import BytesIO from PIL import Image import os pipe = AutoPipelineForImage2Image.from_pretrained( "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() prompt = "A fantasy landscape, Cinematic lighting" negative_prompt = "low quality, bad quality" url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" response = requests.get(url) image = Image.open(BytesIO(response.content)).convert("RGB") image.thumbnail((768, 768)) image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0] ``` """ INPAINT_EXAMPLE_DOC_STRING = """ Examples: ```py from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image import torch import numpy as np pipe = AutoPipelineForInpainting.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() prompt = "A fantasy landscape, Cinematic lighting" negative_prompt = "low quality, bad quality" original_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) mask = np.zeros((768, 768), dtype=np.float32) # Let's mask out an area above the cat's head mask[:250, 250:-250] = 1 image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0] ``` """ class KandinskyCombinedPipeline(DiffusionPipeline): """ Combined Pipeline for text-to-image generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: text_encoder ([`MultilingualCLIP`]): Frozen text-encoder. tokenizer ([`XLMRobertaTokenizer`]): Tokenizer of class scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): The canonincal unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): Frozen text-encoder. prior_tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). prior_scheduler ([`UnCLIPScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. """ _load_connected_pipes = True model_cpu_offload_seq = "text_encoder->unet->movq->prior_prior->prior_image_encoder->prior_text_encoder" def __init__( self, text_encoder: MultilingualCLIP, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DDPMScheduler], movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor, ): super().__init__() self.register_modules( text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor, ) self.prior_pipe = KandinskyPriorPipeline( prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor, ) self.decoder_pipe = KandinskyPipeline( text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, ) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_sequential_cpu_offload(self, gpu_id=0): r""" Offloads all models (`unet`, `text_encoder`, `vae`, and `safety checker` state dicts) to CPU using 🤗 Accelerate, significantly reducing memory usage. Models are moved to a `torch.device('meta')` and loaded on a GPU only when their specific submodule's `forward` method is called. Offloading happens on a submodule basis. Memory savings are higher than using `enable_model_cpu_offload`, but performance is lower. """ self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. prior_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. prior_num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ prior_outputs = self.prior_pipe( prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type="pt", return_dict=False, ) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = (image_embeds.shape[0] // len(prompt)) * prompt outputs = self.decoder_pipe( prompt=prompt, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict, ) self.maybe_free_model_hooks() return outputs class KandinskyImg2ImgCombinedPipeline(DiffusionPipeline): """ Combined Pipeline for image-to-image generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: text_encoder ([`MultilingualCLIP`]): Frozen text-encoder. tokenizer ([`XLMRobertaTokenizer`]): Tokenizer of class scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): The canonincal unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): Frozen text-encoder. prior_tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). prior_scheduler ([`UnCLIPScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. """ _load_connected_pipes = True model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->prior_prior->" "text_encoder->unet->movq" def __init__( self, text_encoder: MultilingualCLIP, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DDPMScheduler], movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor, ): super().__init__() self.register_modules( text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor, ) self.prior_pipe = KandinskyPriorPipeline( prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor, ) self.decoder_pipe = KandinskyImg2ImgPipeline( text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, ) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_sequential_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. Note that offloading happens on a submodule basis. Memory savings are higher than with `enable_model_cpu_offload`, but performance is lower. """ self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], negative_prompt: Optional[Union[str, List[str]]] = None, num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, strength: float = 0.3, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded again. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. strength (`float`, *optional*, defaults to 0.3): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. prior_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. prior_num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ prior_outputs = self.prior_pipe( prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type="pt", return_dict=False, ) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt image = [image] if isinstance(prompt, PIL.Image.Image) else image if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = (image_embeds.shape[0] // len(prompt)) * prompt if ( isinstance(image, (list, tuple)) and len(image) < image_embeds.shape[0] and image_embeds.shape[0] % len(image) == 0 ): image = (image_embeds.shape[0] // len(image)) * image outputs = self.decoder_pipe( prompt=prompt, image=image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, strength=strength, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict, ) self.maybe_free_model_hooks() return outputs class KandinskyInpaintCombinedPipeline(DiffusionPipeline): """ Combined Pipeline for generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: text_encoder ([`MultilingualCLIP`]): Frozen text-encoder. tokenizer ([`XLMRobertaTokenizer`]): Tokenizer of class scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): The canonincal unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): Frozen text-encoder. prior_tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). prior_scheduler ([`UnCLIPScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. """ _load_connected_pipes = True model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->prior_prior->text_encoder->unet->movq" def __init__( self, text_encoder: MultilingualCLIP, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DDPMScheduler], movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor, ): super().__init__() self.register_modules( text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor, ) self.prior_pipe = KandinskyPriorPipeline( prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor, ) self.decoder_pipe = KandinskyInpaintPipeline( text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, ) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_sequential_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. Note that offloading happens on a submodule basis. Memory savings are higher than with `enable_model_cpu_offload`, but performance is lower. """ self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], mask_image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], negative_prompt: Optional[Union[str, List[str]]] = None, num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded again. mask_image (`np.array`): Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. prior_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. prior_num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ prior_outputs = self.prior_pipe( prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type="pt", return_dict=False, ) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt image = [image] if isinstance(prompt, PIL.Image.Image) else image mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = (image_embeds.shape[0] // len(prompt)) * prompt if ( isinstance(image, (list, tuple)) and len(image) < image_embeds.shape[0] and image_embeds.shape[0] % len(image) == 0 ): image = (image_embeds.shape[0] // len(image)) * image if ( isinstance(mask_image, (list, tuple)) and len(mask_image) < image_embeds.shape[0] and image_embeds.shape[0] % len(mask_image) == 0 ): mask_image = (image_embeds.shape[0] // len(mask_image)) * mask_image outputs = self.decoder_pipe( prompt=prompt, image=image, mask_image=mask_image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict, ) self.maybe_free_model_hooks() return outputs
diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py", "repo_id": "diffusers", "token_count": 16910 }
118
from typing import Callable, Dict, List, Optional, Union import torch from transformers import T5EncoderModel, T5Tokenizer from ...loaders import LoraLoaderMixin from ...models import Kandinsky3UNet, VQModel from ...schedulers import DDPMScheduler from ...utils import ( deprecate, is_accelerate_available, logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from diffusers import AutoPipelineForText2Image >>> import torch >>> pipe = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16) >>> pipe.enable_model_cpu_offload() >>> prompt = "A photograph of the inside of a subway train. There are raccoons sitting on the seats. One of them is reading a newspaper. The window shows the city in the background." >>> generator = torch.Generator(device="cpu").manual_seed(0) >>> image = pipe(prompt, num_inference_steps=25, generator=generator).images[0] ``` """ def downscale_height_and_width(height, width, scale_factor=8): new_height = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 new_width = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class Kandinsky3Pipeline(DiffusionPipeline, LoraLoaderMixin): model_cpu_offload_seq = "text_encoder->unet->movq" _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "negative_attention_mask", "attention_mask", ] def __init__( self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, unet: Kandinsky3UNet, scheduler: DDPMScheduler, movq: VQModel, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, movq=movq ) def remove_all_hooks(self): if is_accelerate_available(): from accelerate.hooks import remove_hook_from_module else: raise ImportError("Please install accelerate via `pip install accelerate`") for model in [self.text_encoder, self.unet, self.movq]: if model is not None: remove_hook_from_module(model, recurse=True) self.unet_offload_hook = None self.text_encoder_offload_hook = None self.final_offload_hook = None def process_embeds(self, embeddings, attention_mask, cut_context): if cut_context: embeddings[attention_mask == 0] = torch.zeros_like(embeddings[attention_mask == 0]) max_seq_length = attention_mask.sum(-1).max() + 1 embeddings = embeddings[:, :max_seq_length] attention_mask = attention_mask[:, :max_seq_length] return embeddings, attention_mask @torch.no_grad() def encode_prompt( self, prompt, do_classifier_free_guidance=True, num_images_per_prompt=1, device=None, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, _cut_context=False, attention_mask: Optional[torch.FloatTensor] = None, negative_attention_mask: Optional[torch.FloatTensor] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`, *optional*): torch device to place the resulting embeddings on num_images_per_prompt (`int`, *optional*, defaults to 1): number of images that should be generated per prompt do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask. Must provide if passing `prompt_embeds` directly. negative_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly. """ if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = 128 if prompt_embeds is None: text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(device) attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = self.text_encoder( text_input_ids, attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] prompt_embeds, attention_mask = self.process_embeds(prompt_embeds, attention_mask, _cut_context) prompt_embeds = prompt_embeds * attention_mask.unsqueeze(2) if self.text_encoder is not None: dtype = self.text_encoder.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) attention_mask = attention_mask.repeat(num_images_per_prompt, 1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt if negative_prompt is not None: uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=128, truncation=True, return_attention_mask=True, return_tensors="pt", ) text_input_ids = uncond_input.input_ids.to(device) negative_attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = self.text_encoder( text_input_ids, attention_mask=negative_attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds[:, : prompt_embeds.shape[1]] negative_attention_mask = negative_attention_mask[:, : prompt_embeds.shape[1]] negative_prompt_embeds = negative_prompt_embeds * negative_attention_mask.unsqueeze(2) else: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_attention_mask = torch.zeros_like(attention_mask) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) if negative_prompt_embeds.shape != prompt_embeds.shape: negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_attention_mask = negative_attention_mask.repeat(num_images_per_prompt, 1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes else: negative_prompt_embeds = None negative_attention_mask = None return prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def check_inputs( self, prompt, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, attention_mask=None, negative_attention_mask=None, ): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if negative_prompt_embeds is not None and negative_attention_mask is None: raise ValueError("Please provide `negative_attention_mask` along with `negative_prompt_embeds`") if negative_prompt_embeds is not None and negative_attention_mask is not None: if negative_prompt_embeds.shape[:2] != negative_attention_mask.shape: raise ValueError( "`negative_prompt_embeds` and `negative_attention_mask` must have the same batch_size and token length when passed directly, but" f" got: `negative_prompt_embeds` {negative_prompt_embeds.shape[:2]} != `negative_attention_mask`" f" {negative_attention_mask.shape}." ) if prompt_embeds is not None and attention_mask is None: raise ValueError("Please provide `attention_mask` along with `prompt_embeds`") if prompt_embeds is not None and attention_mask is not None: if prompt_embeds.shape[:2] != attention_mask.shape: raise ValueError( "`prompt_embeds` and `attention_mask` must have the same batch_size and token length when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape[:2]} != `attention_mask`" f" {attention_mask.shape}." ) @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, num_inference_steps: int = 25, guidance_scale: float = 3.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, height: Optional[int] = 1024, width: Optional[int] = 1024, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, negative_attention_mask: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, latents=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 3.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. height (`int`, *optional*, defaults to self.unet.config.sample_size): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size): The width in pixels of the generated image. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask. Must provide if passing `prompt_embeds` directly. negative_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. clean_caption (`bool`, *optional*, defaults to `True`): Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to be installed. If the dependencies are not installed, the embeddings will be created from the raw prompt. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) cut_context = True device = self._execution_device # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs, attention_mask, negative_attention_mask, ) self._guidance_scale = guidance_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # 3. Encode input prompt prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask = self.encode_prompt( prompt, self.do_classifier_free_guidance, num_images_per_prompt=num_images_per_prompt, device=device, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, _cut_context=cut_context, attention_mask=attention_mask, negative_attention_mask=negative_attention_mask, ) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) attention_mask = torch.cat([negative_attention_mask, attention_mask]).bool() # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latents height, width = downscale_height_and_width(height, width, 8) latents = self.prepare_latents( (batch_size * num_images_per_prompt, 4, height, width), prompt_embeds.dtype, device, generator, latents, self.scheduler, ) if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: self.text_encoder_offload_hook.offload() # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, encoder_attention_mask=attention_mask, return_dict=False, )[0] if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = (guidance_scale + 1.0) * noise_pred_text - guidance_scale * noise_pred_uncond # noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step( noise_pred, t, latents, generator=generator, ).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) attention_mask = callback_outputs.pop("attention_mask", attention_mask) negative_attention_mask = callback_outputs.pop("negative_attention_mask", negative_attention_mask) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # post-processing if output_type not in ["pt", "np", "pil", "latent"]: raise ValueError( f"Only the output types `pt`, `pil`, `np` and `latent` are supported not output_type={output_type}" ) if not output_type == "latent": image = self.movq.decode(latents, force_not_quantize=True)["sample"] if output_type in ["np", "pil"]: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) else: image = latents self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py", "repo_id": "diffusers", "token_count": 12850 }
119
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import inspect import os from typing import Any, Dict, List, Optional, Union import flax import numpy as np import PIL.Image from flax.core.frozen_dict import FrozenDict from huggingface_hub import create_repo, snapshot_download from huggingface_hub.utils import validate_hf_hub_args from PIL import Image from tqdm.auto import tqdm from ..configuration_utils import ConfigMixin from ..models.modeling_flax_utils import FLAX_WEIGHTS_NAME, FlaxModelMixin from ..schedulers.scheduling_utils_flax import SCHEDULER_CONFIG_NAME, FlaxSchedulerMixin from ..utils import ( CONFIG_NAME, BaseOutput, PushToHubMixin, http_user_agent, is_transformers_available, logging, ) if is_transformers_available(): from transformers import FlaxPreTrainedModel INDEX_FILE = "diffusion_flax_model.bin" logger = logging.get_logger(__name__) LOADABLE_CLASSES = { "diffusers": { "FlaxModelMixin": ["save_pretrained", "from_pretrained"], "FlaxSchedulerMixin": ["save_pretrained", "from_pretrained"], "FlaxDiffusionPipeline": ["save_pretrained", "from_pretrained"], }, "transformers": { "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], "FlaxPreTrainedModel": ["save_pretrained", "from_pretrained"], "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], "ProcessorMixin": ["save_pretrained", "from_pretrained"], "ImageProcessingMixin": ["save_pretrained", "from_pretrained"], }, } ALL_IMPORTABLE_CLASSES = {} for library in LOADABLE_CLASSES: ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) def import_flax_or_no_model(module, class_name): try: # 1. First make sure that if a Flax object is present, import this one class_obj = getattr(module, "Flax" + class_name) except AttributeError: # 2. If this doesn't work, it's not a model and we don't append "Flax" class_obj = getattr(module, class_name) except AttributeError: raise ValueError(f"Neither Flax{class_name} nor {class_name} exist in {module}") return class_obj @flax.struct.dataclass class FlaxImagePipelineOutput(BaseOutput): """ Output class for image pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. """ images: Union[List[PIL.Image.Image], np.ndarray] class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin): r""" Base class for Flax-based pipelines. [`FlaxDiffusionPipeline`] stores all components (models, schedulers, and processors) for diffusion pipelines and provides methods for loading, downloading and saving models. It also includes methods to: - enable/disable the progress bar for the denoising iteration Class attributes: - **config_name** ([`str`]) -- The configuration filename that stores the class and module names of all the diffusion pipeline's components. """ config_name = "model_index.json" def register_modules(self, **kwargs): # import it here to avoid circular import from diffusers import pipelines for name, module in kwargs.items(): if module is None: register_dict = {name: (None, None)} else: # retrieve library library = module.__module__.split(".")[0] # check if the module is a pipeline module pipeline_dir = module.__module__.split(".")[-2] path = module.__module__.split(".") is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir) # if library is not in LOADABLE_CLASSES, then it is a custom module. # Or if it's a pipeline module, then the module is inside the pipeline # folder so we set the library to module name. if library not in LOADABLE_CLASSES or is_pipeline_module: library = pipeline_dir # retrieve class_name class_name = module.__class__.__name__ register_dict = {name: (library, class_name)} # save model index config self.register_to_config(**register_dict) # set models setattr(self, name, module) def save_pretrained( self, save_directory: Union[str, os.PathLike], params: Union[Dict, FrozenDict], push_to_hub: bool = False, **kwargs, ): # TODO: handle inference_state """ Save all saveable variables of the pipeline to a directory. A pipeline variable can be saved and loaded if its class implements both a save and loading method. The pipeline is easily reloaded using the [`~FlaxDiffusionPipeline.from_pretrained`] class method. Arguments: save_directory (`str` or `os.PathLike`): Directory to which to save. Will be created if it doesn't exist. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ self.save_config(save_directory) model_index_dict = dict(self.config) model_index_dict.pop("_class_name") model_index_dict.pop("_diffusers_version") model_index_dict.pop("_module", None) if push_to_hub: commit_message = kwargs.pop("commit_message", None) private = kwargs.pop("private", False) create_pr = kwargs.pop("create_pr", False) token = kwargs.pop("token", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id for pipeline_component_name in model_index_dict.keys(): sub_model = getattr(self, pipeline_component_name) if sub_model is None: # edge case for saving a pipeline with safety_checker=None continue model_cls = sub_model.__class__ save_method_name = None # search for the model's base class in LOADABLE_CLASSES for library_name, library_classes in LOADABLE_CLASSES.items(): library = importlib.import_module(library_name) for base_class, save_load_methods in library_classes.items(): class_candidate = getattr(library, base_class, None) if class_candidate is not None and issubclass(model_cls, class_candidate): # if we found a suitable base class in LOADABLE_CLASSES then grab its save method save_method_name = save_load_methods[0] break if save_method_name is not None: break save_method = getattr(sub_model, save_method_name) expects_params = "params" in set(inspect.signature(save_method).parameters.keys()) if expects_params: save_method( os.path.join(save_directory, pipeline_component_name), params=params[pipeline_component_name] ) else: save_method(os.path.join(save_directory, pipeline_component_name)) if push_to_hub: self._upload_folder( save_directory, repo_id, token=token, commit_message=commit_message, create_pr=create_pr, ) @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): r""" Instantiate a Flax-based diffusion pipeline from pretrained pipeline weights. The pipeline is set in evaluation mode (`model.eval()) by default and dropout modules are deactivated. If you get the error message below, you need to finetune the weights for your downstream task: ``` Some weights of FlaxUNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: ``` Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *repo id* (for example `runwayml/stable-diffusion-v1-5`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved using [`~FlaxDiffusionPipeline.save_pretrained`]. dtype (`str` or `jnp.dtype`, *optional*): Override the default `jnp.dtype` and load the model under this dtype. If `"auto"`, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to resume downloading the model weights and configuration files. If set to `False`, any incompletely downloaded files are deleted. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if you're downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components) of the specific pipeline class. The overwritten components are passed directly to the pipelines `__init__` method. <Tip> To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `huggingface-cli login`. </Tip> Examples: ```py >>> from diffusers import FlaxDiffusionPipeline >>> # Download pipeline from huggingface.co and cache. >>> # Requires to be logged in to Hugging Face hub, >>> # see more in [the documentation](https://huggingface.co/docs/hub/security-tokens) >>> pipeline, params = FlaxDiffusionPipeline.from_pretrained( ... "runwayml/stable-diffusion-v1-5", ... revision="bf16", ... dtype=jnp.bfloat16, ... ) >>> # Download pipeline, but use a different scheduler >>> from diffusers import FlaxDPMSolverMultistepScheduler >>> model_id = "runwayml/stable-diffusion-v1-5" >>> dpmpp, dpmpp_state = FlaxDPMSolverMultistepScheduler.from_pretrained( ... model_id, ... subfolder="scheduler", ... ) >>> dpm_pipe, dpm_params = FlaxStableDiffusionPipeline.from_pretrained( ... model_id, revision="bf16", dtype=jnp.bfloat16, scheduler=dpmpp ... ) >>> dpm_params["scheduler"] = dpmpp_state ``` """ cache_dir = kwargs.pop("cache_dir", None) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) from_pt = kwargs.pop("from_pt", False) use_memory_efficient_attention = kwargs.pop("use_memory_efficient_attention", False) split_head_dim = kwargs.pop("split_head_dim", False) dtype = kwargs.pop("dtype", None) # 1. Download the checkpoints and configs # use snapshot download here to get it working from from_pretrained if not os.path.isdir(pretrained_model_name_or_path): config_dict = cls.load_config( pretrained_model_name_or_path, cache_dir=cache_dir, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, ) # make sure we only download sub-folders and `diffusers` filenames folder_names = [k for k in config_dict.keys() if not k.startswith("_")] allow_patterns = [os.path.join(k, "*") for k in folder_names] allow_patterns += [FLAX_WEIGHTS_NAME, SCHEDULER_CONFIG_NAME, CONFIG_NAME, cls.config_name] ignore_patterns = ["*.bin", "*.safetensors"] if not from_pt else [] ignore_patterns += ["*.onnx", "*.onnx_data", "*.xml", "*.pb"] if cls != FlaxDiffusionPipeline: requested_pipeline_class = cls.__name__ else: requested_pipeline_class = config_dict.get("_class_name", cls.__name__) requested_pipeline_class = ( requested_pipeline_class if requested_pipeline_class.startswith("Flax") else "Flax" + requested_pipeline_class ) user_agent = {"pipeline_class": requested_pipeline_class} user_agent = http_user_agent(user_agent) # download all allow_patterns cached_folder = snapshot_download( pretrained_model_name_or_path, cache_dir=cache_dir, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, user_agent=user_agent, ) else: cached_folder = pretrained_model_name_or_path config_dict = cls.load_config(cached_folder) # 2. Load the pipeline class, if using custom module then load it from the hub # if we load from explicit class, let's use it if cls != FlaxDiffusionPipeline: pipeline_class = cls else: diffusers_module = importlib.import_module(cls.__module__.split(".")[0]) class_name = ( config_dict["_class_name"] if config_dict["_class_name"].startswith("Flax") else "Flax" + config_dict["_class_name"] ) pipeline_class = getattr(diffusers_module, class_name) # some modules can be passed directly to the init # in this case they are already instantiated in `kwargs` # extract them here expected_modules, optional_kwargs = cls._get_signature_keys(pipeline_class) passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs) # define init kwargs init_kwargs = {k: init_dict.pop(k) for k in optional_kwargs if k in init_dict} init_kwargs = {**init_kwargs, **passed_pipe_kwargs} # remove `null` components def load_module(name, value): if value[0] is None: return False if name in passed_class_obj and passed_class_obj[name] is None: return False return True init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)} # Throw nice warnings / errors for fast accelerate loading if len(unused_kwargs) > 0: logger.warning( f"Keyword arguments {unused_kwargs} are not expected by {pipeline_class.__name__} and will be ignored." ) # inference_params params = {} # import it here to avoid circular import from diffusers import pipelines # 3. Load each module in the pipeline for name, (library_name, class_name) in init_dict.items(): if class_name is None: # edge case for when the pipeline was saved with safety_checker=None init_kwargs[name] = None continue is_pipeline_module = hasattr(pipelines, library_name) loaded_sub_model = None sub_model_should_be_defined = True # if the model is in a pipeline module, then we load it from the pipeline if name in passed_class_obj: # 1. check that passed_class_obj has correct parent class if not is_pipeline_module: library = importlib.import_module(library_name) class_obj = getattr(library, class_name) importable_classes = LOADABLE_CLASSES[library_name] class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} expected_class_obj = None for class_name, class_candidate in class_candidates.items(): if class_candidate is not None and issubclass(class_obj, class_candidate): expected_class_obj = class_candidate if not issubclass(passed_class_obj[name].__class__, expected_class_obj): raise ValueError( f"{passed_class_obj[name]} is of type: {type(passed_class_obj[name])}, but should be" f" {expected_class_obj}" ) elif passed_class_obj[name] is None: logger.warning( f"You have passed `None` for {name} to disable its functionality in {pipeline_class}. Note" f" that this might lead to problems when using {pipeline_class} and is not recommended." ) sub_model_should_be_defined = False else: logger.warning( f"You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it" " has the correct type" ) # set passed class object loaded_sub_model = passed_class_obj[name] elif is_pipeline_module: pipeline_module = getattr(pipelines, library_name) class_obj = import_flax_or_no_model(pipeline_module, class_name) importable_classes = ALL_IMPORTABLE_CLASSES class_candidates = {c: class_obj for c in importable_classes.keys()} else: # else we just import it from the library. library = importlib.import_module(library_name) class_obj = import_flax_or_no_model(library, class_name) importable_classes = LOADABLE_CLASSES[library_name] class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} if loaded_sub_model is None and sub_model_should_be_defined: load_method_name = None for class_name, class_candidate in class_candidates.items(): if class_candidate is not None and issubclass(class_obj, class_candidate): load_method_name = importable_classes[class_name][1] load_method = getattr(class_obj, load_method_name) # check if the module is in a subdirectory if os.path.isdir(os.path.join(cached_folder, name)): loadable_folder = os.path.join(cached_folder, name) else: loaded_sub_model = cached_folder if issubclass(class_obj, FlaxModelMixin): loaded_sub_model, loaded_params = load_method( loadable_folder, from_pt=from_pt, use_memory_efficient_attention=use_memory_efficient_attention, split_head_dim=split_head_dim, dtype=dtype, ) params[name] = loaded_params elif is_transformers_available() and issubclass(class_obj, FlaxPreTrainedModel): if from_pt: # TODO(Suraj): Fix this in Transformers. We should be able to use `_do_init=False` here loaded_sub_model = load_method(loadable_folder, from_pt=from_pt) loaded_params = loaded_sub_model.params del loaded_sub_model._params else: loaded_sub_model, loaded_params = load_method(loadable_folder, _do_init=False) params[name] = loaded_params elif issubclass(class_obj, FlaxSchedulerMixin): loaded_sub_model, scheduler_state = load_method(loadable_folder) params[name] = scheduler_state else: loaded_sub_model = load_method(loadable_folder) init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...) # 4. Potentially add passed objects if expected missing_modules = set(expected_modules) - set(init_kwargs.keys()) passed_modules = list(passed_class_obj.keys()) if len(missing_modules) > 0 and missing_modules <= set(passed_modules): for module in missing_modules: init_kwargs[module] = passed_class_obj.get(module, None) elif len(missing_modules) > 0: passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs raise ValueError( f"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed." ) model = pipeline_class(**init_kwargs, dtype=dtype) return model, params @classmethod def _get_signature_keys(cls, obj): parameters = inspect.signature(obj.__init__).parameters required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty} optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty}) expected_modules = set(required_parameters.keys()) - {"self"} return expected_modules, optional_parameters @property def components(self) -> Dict[str, Any]: r""" The `self.components` property can be useful to run different pipelines with the same weights and configurations to not have to re-allocate memory. Examples: ```py >>> from diffusers import ( ... FlaxStableDiffusionPipeline, ... FlaxStableDiffusionImg2ImgPipeline, ... ) >>> text2img = FlaxStableDiffusionPipeline.from_pretrained( ... "runwayml/stable-diffusion-v1-5", revision="bf16", dtype=jnp.bfloat16 ... ) >>> img2img = FlaxStableDiffusionImg2ImgPipeline(**text2img.components) ``` Returns: A dictionary containing all the modules needed to initialize the pipeline. """ expected_modules, optional_parameters = self._get_signature_keys(self) components = { k: getattr(self, k) for k in self.config.keys() if not k.startswith("_") and k not in optional_parameters } if set(components.keys()) != expected_modules: raise ValueError( f"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected" f" {expected_modules} to be defined, but {components} are defined." ) return components @staticmethod def numpy_to_pil(images): """ Convert a NumPy image or a batch of images to a PIL image. """ if images.ndim == 3: images = images[None, ...] images = (images * 255).round().astype("uint8") if images.shape[-1] == 1: # special case for grayscale (single channel) images pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] else: pil_images = [Image.fromarray(image) for image in images] return pil_images # TODO: make it compatible with jax.lax def progress_bar(self, iterable): if not hasattr(self, "_progress_bar_config"): self._progress_bar_config = {} elif not isinstance(self._progress_bar_config, dict): raise ValueError( f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}." ) return tqdm(iterable, **self._progress_bar_config) def set_progress_bar_config(self, **kwargs): self._progress_bar_config = kwargs
diffusers/src/diffusers/pipelines/pipeline_flax_utils.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/pipeline_flax_utils.py", "repo_id": "diffusers", "token_count": 12212 }
120
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPTextModelOutput from ...image_processor import VaeImageProcessor from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, PriorTransformer, UNet2DConditionModel from ...models.embeddings import get_timestep_embedding from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableUnCLIPPipeline >>> pipe = StableUnCLIPPipeline.from_pretrained( ... "fusing/stable-unclip-2-1-l", torch_dtype=torch.float16 ... ) # TODO update model path >>> pipe = pipe.to("cuda") >>> prompt = "a photo of an astronaut riding a horse on mars" >>> images = pipe(prompt).images >>> images[0].save("astronaut_horse.png") ``` """ class StableUnCLIPPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin): """ Pipeline for text-to-image generation using stable unCLIP. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights Args: prior_tokenizer ([`CLIPTokenizer`]): A [`CLIPTokenizer`]. prior_text_encoder ([`CLIPTextModelWithProjection`]): Frozen [`CLIPTextModelWithProjection`] text-encoder. prior ([`PriorTransformer`]): The canonincal unCLIP prior to approximate the image embedding from the text embedding. prior_scheduler ([`KarrasDiffusionSchedulers`]): Scheduler used in the prior denoising process. image_normalizer ([`StableUnCLIPImageNormalizer`]): Used to normalize the predicted image embeddings before the noise is applied and un-normalize the image embeddings after the noise has been applied. image_noising_scheduler ([`KarrasDiffusionSchedulers`]): Noise schedule for adding noise to the predicted image embeddings. The amount of noise to add is determined by the `noise_level`. tokenizer ([`CLIPTokenizer`]): A [`CLIPTokenizer`]. text_encoder ([`CLIPTextModel`]): Frozen [`CLIPTextModel`] text-encoder. unet ([`UNet2DConditionModel`]): A [`UNet2DConditionModel`] to denoise the encoded image latents. scheduler ([`KarrasDiffusionSchedulers`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. """ _exclude_from_cpu_offload = ["prior", "image_normalizer"] model_cpu_offload_seq = "text_encoder->prior_text_encoder->unet->vae" # prior components prior_tokenizer: CLIPTokenizer prior_text_encoder: CLIPTextModelWithProjection prior: PriorTransformer prior_scheduler: KarrasDiffusionSchedulers # image noising components image_normalizer: StableUnCLIPImageNormalizer image_noising_scheduler: KarrasDiffusionSchedulers # regular denoising components tokenizer: CLIPTokenizer text_encoder: CLIPTextModel unet: UNet2DConditionModel scheduler: KarrasDiffusionSchedulers vae: AutoencoderKL def __init__( self, # prior components prior_tokenizer: CLIPTokenizer, prior_text_encoder: CLIPTextModelWithProjection, prior: PriorTransformer, prior_scheduler: KarrasDiffusionSchedulers, # image noising components image_normalizer: StableUnCLIPImageNormalizer, image_noising_scheduler: KarrasDiffusionSchedulers, # regular denoising components tokenizer: CLIPTokenizer, text_encoder: CLIPTextModelWithProjection, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, # vae vae: AutoencoderKL, ): super().__init__() self.register_modules( prior_tokenizer=prior_tokenizer, prior_text_encoder=prior_text_encoder, prior=prior, prior_scheduler=prior_scheduler, image_normalizer=image_normalizer, image_noising_scheduler=image_noising_scheduler, tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, vae=vae, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt with _encode_prompt->_encode_prior_prompt, tokenizer->prior_tokenizer, text_encoder->prior_text_encoder def _encode_prior_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, text_attention_mask: Optional[torch.Tensor] = None, ): if text_model_output is None: batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.prior_tokenizer( prompt, padding="max_length", max_length=self.prior_tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) untruncated_ids = self.prior_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.prior_tokenizer.batch_decode( untruncated_ids[:, self.prior_tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.prior_tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.prior_tokenizer.model_max_length] prior_text_encoder_output = self.prior_text_encoder(text_input_ids.to(device)) prompt_embeds = prior_text_encoder_output.text_embeds text_enc_hid_states = prior_text_encoder_output.last_hidden_state else: batch_size = text_model_output[0].shape[0] prompt_embeds, text_enc_hid_states = text_model_output[0], text_model_output[1] text_mask = text_attention_mask prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_enc_hid_states = text_enc_hid_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens = [""] * batch_size uncond_input = self.prior_tokenizer( uncond_tokens, padding="max_length", max_length=self.prior_tokenizer.model_max_length, truncation=True, return_tensors="pt", ) uncond_text_mask = uncond_input.attention_mask.bool().to(device) negative_prompt_embeds_prior_text_encoder_output = self.prior_text_encoder( uncond_input.input_ids.to(device) ) negative_prompt_embeds = negative_prompt_embeds_prior_text_encoder_output.text_embeds uncond_text_enc_hid_states = negative_prompt_embeds_prior_text_encoder_output.last_hidden_state # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_enc_hid_states.shape[1] uncond_text_enc_hid_states = uncond_text_enc_hid_states.repeat(1, num_images_per_prompt, 1) uncond_text_enc_hid_states = uncond_text_enc_hid_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_enc_hid_states = torch.cat([uncond_text_enc_hid_states, text_enc_hid_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return prompt_embeds, text_enc_hid_states, text_mask # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs with prepare_extra_step_kwargs->prepare_prior_extra_step_kwargs, scheduler->prior_scheduler def prepare_prior_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the prior_scheduler step, since not all prior_schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other prior_schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.prior_scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the prior_scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.prior_scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, callback_steps, noise_level, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Please make sure to define only one of the two." ) if prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( "Provide either `negative_prompt` or `negative_prompt_embeds`. Cannot leave both `negative_prompt` and `negative_prompt_embeds` undefined." ) if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps: raise ValueError( f"`noise_level` must be between 0 and {self.image_noising_scheduler.config.num_train_timesteps - 1}, inclusive." ) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def noise_image_embeddings( self, image_embeds: torch.Tensor, noise_level: int, noise: Optional[torch.FloatTensor] = None, generator: Optional[torch.Generator] = None, ): """ Add noise to the image embeddings. The amount of noise is controlled by a `noise_level` input. A higher `noise_level` increases the variance in the final un-noised images. The noise is applied in two ways: 1. A noise schedule is applied directly to the embeddings. 2. A vector of sinusoidal time embeddings are appended to the output. In both cases, the amount of noise is controlled by the same `noise_level`. The embeddings are normalized before the noise is applied and un-normalized after the noise is applied. """ if noise is None: noise = randn_tensor( image_embeds.shape, generator=generator, device=image_embeds.device, dtype=image_embeds.dtype ) noise_level = torch.tensor([noise_level] * image_embeds.shape[0], device=image_embeds.device) self.image_normalizer.to(image_embeds.device) image_embeds = self.image_normalizer.scale(image_embeds) image_embeds = self.image_noising_scheduler.add_noise(image_embeds, timesteps=noise_level, noise=noise) image_embeds = self.image_normalizer.unscale(image_embeds) noise_level = get_timestep_embedding( timesteps=noise_level, embedding_dim=image_embeds.shape[-1], flip_sin_to_cos=True, downscale_freq_shift=0 ) # `get_timestep_embeddings` does not contain any weights and will always return f32 tensors, # but we might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. noise_level = noise_level.to(image_embeds.dtype) image_embeds = torch.cat((image_embeds, noise_level), 1) return image_embeds @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, # regular denoising process args prompt: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 20, guidance_scale: float = 10.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, noise_level: int = 0, # prior args prior_num_inference_steps: int = 25, prior_guidance_scale: float = 4.0, prior_latents: Optional[torch.FloatTensor] = None, clip_skip: Optional[int] = None, ): """ The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 20): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 10.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). noise_level (`int`, *optional*, defaults to `0`): The amount of noise to add to the image embeddings. A higher `noise_level` increases the variance in the final un-noised images. See [`StableUnCLIPPipeline.noise_image_embeddings`] for more details. prior_num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps in the prior denoising process. More denoising steps usually lead to a higher quality image at the expense of slower inference. prior_guidance_scale (`float`, *optional*, defaults to 4.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. prior_latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image embedding generation in the prior denoising process. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~ pipeline_utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, height=height, width=width, callback_steps=callback_steps, noise_level=noise_level, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] batch_size = batch_size * num_images_per_prompt device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. prior_do_classifier_free_guidance = prior_guidance_scale > 1.0 # 3. Encode input prompt prior_prompt_embeds, prior_text_encoder_hidden_states, prior_text_mask = self._encode_prior_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=prior_do_classifier_free_guidance, ) # 4. Prepare prior timesteps self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) prior_timesteps_tensor = self.prior_scheduler.timesteps # 5. Prepare prior latent variables embedding_dim = self.prior.config.embedding_dim prior_latents = self.prepare_latents( (batch_size, embedding_dim), prior_prompt_embeds.dtype, device, generator, prior_latents, self.prior_scheduler, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline prior_extra_step_kwargs = self.prepare_prior_extra_step_kwargs(generator, eta) # 7. Prior denoising loop for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([prior_latents] * 2) if prior_do_classifier_free_guidance else prior_latents latent_model_input = self.prior_scheduler.scale_model_input(latent_model_input, t) predicted_image_embedding = self.prior( latent_model_input, timestep=t, proj_embedding=prior_prompt_embeds, encoder_hidden_states=prior_text_encoder_hidden_states, attention_mask=prior_text_mask, ).predicted_image_embedding if prior_do_classifier_free_guidance: predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( predicted_image_embedding_text - predicted_image_embedding_uncond ) prior_latents = self.prior_scheduler.step( predicted_image_embedding, timestep=t, sample=prior_latents, **prior_extra_step_kwargs, return_dict=False, )[0] if callback is not None and i % callback_steps == 0: callback(i, t, prior_latents) prior_latents = self.prior.post_process_latents(prior_latents) image_embeds = prior_latents # done prior # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 8. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # 9. Prepare image embeddings image_embeds = self.noise_image_embeddings( image_embeds=image_embeds, noise_level=noise_level, generator=generator, ) if do_classifier_free_guidance: negative_prompt_embeds = torch.zeros_like(image_embeds) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes image_embeds = torch.cat([negative_prompt_embeds, image_embeds]) # 10. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 11. Prepare latent variables num_channels_latents = self.unet.config.in_channels shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) latents = self.prepare_latents( shape=shape, dtype=prompt_embeds.dtype, device=device, generator=generator, latents=latents, scheduler=self.scheduler, ) # 12. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 13. Denoising loop for i, t in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, class_labels=image_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py", "repo_id": "diffusers", "token_count": 19983 }
121
import numpy as np import torch from ...utils import is_invisible_watermark_available if is_invisible_watermark_available(): from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 WATERMARK_MESSAGE = 0b101100111110110010010000011110111011000110011110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 WATERMARK_BITS = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class StableDiffusionXLWatermarker: def __init__(self): self.watermark = WATERMARK_BITS self.encoder = WatermarkEncoder() self.encoder.set_watermark("bits", self.watermark) def apply_watermark(self, images: torch.FloatTensor): # can't encode images that are smaller than 256 if images.shape[-1] < 256: return images images = (255 * (images / 2 + 0.5)).cpu().permute(0, 2, 3, 1).float().numpy() images = [self.encoder.encode(image, "dwtDct") for image in images] images = torch.from_numpy(np.array(images)).permute(0, 3, 1, 2) images = torch.clamp(2 * (images / 255 - 0.5), min=-1.0, max=1.0) return images
diffusers/src/diffusers/pipelines/stable_diffusion_xl/watermark.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_xl/watermark.py", "repo_id": "diffusers", "token_count": 509 }
122
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) _dummy_objects.update( {"ImageTextPipelineOutput": ImageTextPipelineOutput, "UniDiffuserPipeline": UniDiffuserPipeline} ) else: _import_structure["modeling_text_decoder"] = ["UniDiffuserTextDecoder"] _import_structure["modeling_uvit"] = ["UniDiffuserModel", "UTransformer2DModel"] _import_structure["pipeline_unidiffuser"] = ["ImageTextPipelineOutput", "UniDiffuserPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformer2DModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/unidiffuser/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/unidiffuser/__init__.py", "repo_id": "diffusers", "token_count": 733 }
123
# Copyright 2023 NVIDIA and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ...utils.torch_utils import randn_tensor from ..scheduling_utils import SchedulerMixin @dataclass class KarrasVeOutput(BaseOutput): """ Output class for the scheduler's step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. derivative (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Derivative of predicted original image sample (x_0). pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.FloatTensor derivative: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None class KarrasVeScheduler(SchedulerMixin, ConfigMixin): """ A stochastic scheduler tailored to variance-expanding models. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. <Tip> For more details on the parameters, see [Appendix E](https://arxiv.org/abs/2206.00364). The grid search values used to find the optimal `{s_noise, s_churn, s_min, s_max}` for a specific model are described in Table 5 of the paper. </Tip> Args: sigma_min (`float`, defaults to 0.02): The minimum noise magnitude. sigma_max (`float`, defaults to 100): The maximum noise magnitude. s_noise (`float`, defaults to 1.007): The amount of additional noise to counteract loss of detail during sampling. A reasonable range is [1.000, 1.011]. s_churn (`float`, defaults to 80): The parameter controlling the overall amount of stochasticity. A reasonable range is [0, 100]. s_min (`float`, defaults to 0.05): The start value of the sigma range to add noise (enable stochasticity). A reasonable range is [0, 10]. s_max (`float`, defaults to 50): The end value of the sigma range to add noise. A reasonable range is [0.2, 80]. """ order = 2 @register_to_config def __init__( self, sigma_min: float = 0.02, sigma_max: float = 100, s_noise: float = 1.007, s_churn: float = 80, s_min: float = 0.05, s_max: float = 50, ): # standard deviation of the initial noise distribution self.init_noise_sigma = sigma_max # setable values self.num_inference_steps: int = None self.timesteps: np.IntTensor = None self.schedule: torch.FloatTensor = None # sigma(t_i) def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.FloatTensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.FloatTensor`: A scaled input sample. """ return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ self.num_inference_steps = num_inference_steps timesteps = np.arange(0, self.num_inference_steps)[::-1].copy() self.timesteps = torch.from_numpy(timesteps).to(device) schedule = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] self.schedule = torch.tensor(schedule, dtype=torch.float32, device=device) def add_noise_to_input( self, sample: torch.FloatTensor, sigma: float, generator: Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]: """ Explicit Langevin-like "churn" step of adding noise to the sample according to a `gamma_i ≥ 0` to reach a higher noise level `sigma_hat = sigma_i + gamma_i*sigma_i`. Args: sample (`torch.FloatTensor`): The input sample. sigma (`float`): generator (`torch.Generator`, *optional*): A random number generator. """ if self.config.s_min <= sigma <= self.config.s_max: gamma = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1) else: gamma = 0 # sample eps ~ N(0, S_noise^2 * I) eps = self.config.s_noise * randn_tensor(sample.shape, generator=generator).to(sample.device) sigma_hat = sigma + gamma * sigma sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def step( self, model_output: torch.FloatTensor, sigma_hat: float, sigma_prev: float, sample_hat: torch.FloatTensor, return_dict: bool = True, ) -> Union[KarrasVeOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. sigma_hat (`float`): sigma_prev (`float`): sample_hat (`torch.FloatTensor`): return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ pred_original_sample = sample_hat + sigma_hat * model_output derivative = (sample_hat - pred_original_sample) / sigma_hat sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample ) def step_correct( self, model_output: torch.FloatTensor, sigma_hat: float, sigma_prev: float, sample_hat: torch.FloatTensor, sample_prev: torch.FloatTensor, derivative: torch.FloatTensor, return_dict: bool = True, ) -> Union[KarrasVeOutput, Tuple]: """ Corrects the predicted sample based on the `model_output` of the network. Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. sigma_hat (`float`): TODO sigma_prev (`float`): TODO sample_hat (`torch.FloatTensor`): TODO sample_prev (`torch.FloatTensor`): TODO derivative (`torch.FloatTensor`): TODO return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_ddpm.DDPMSchedulerOutput`] or `tuple`. Returns: prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO """ pred_original_sample = sample_prev + sigma_prev * model_output derivative_corr = (sample_prev - pred_original_sample) / sigma_prev sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample ) def add_noise(self, original_samples, noise, timesteps): raise NotImplementedError()
diffusers/src/diffusers/schedulers/deprecated/scheduling_karras_ve.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/deprecated/scheduling_karras_ve.py", "repo_id": "diffusers", "token_count": 4090 }
124
# Copyright 2023 ETH Zurich Computer Vision Lab and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class RePaintSchedulerOutput(BaseOutput): """ Output class for the scheduler's step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.FloatTensor pred_original_sample: torch.FloatTensor # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class RePaintScheduler(SchedulerMixin, ConfigMixin): """ `RePaintScheduler` is a scheduler for DDPM inpainting inside a given mask. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. beta_start (`float`, defaults to 0.0001): The starting `beta` value of inference. beta_end (`float`, defaults to 0.02): The final `beta` value. beta_schedule (`str`, defaults to `"linear"`): The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, `squaredcos_cap_v2`, or `sigmoid`. eta (`float`): The weight of noise for added noise in diffusion step. If its value is between 0.0 and 1.0 it corresponds to the DDIM scheduler, and if its value is between -0.0 and 1.0 it corresponds to the DDPM scheduler. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. clip_sample (`bool`, defaults to `True`): Clip the predicted sample between -1 and 1 for numerical stability. """ order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", eta: float = 0.0, trained_betas: Optional[np.ndarray] = None, clip_sample: bool = True, ): if trained_betas is not None: self.betas = torch.from_numpy(trained_betas) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) elif beta_schedule == "sigmoid": # GeoDiff sigmoid schedule betas = torch.linspace(-6, 6, num_train_timesteps) self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.one = torch.tensor(1.0) self.final_alpha_cumprod = torch.tensor(1.0) # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # setable values self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) self.eta = eta def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.FloatTensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.FloatTensor`: A scaled input sample. """ return sample def set_timesteps( self, num_inference_steps: int, jump_length: int = 10, jump_n_sample: int = 10, device: Union[str, torch.device] = None, ): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. jump_length (`int`, defaults to 10): The number of steps taken forward in time before going backward in time for a single jump (“j” in RePaint paper). Take a look at Figure 9 and 10 in the paper. jump_n_sample (`int`, defaults to 10): The number of times to make a forward time jump for a given chosen time sample. Take a look at Figure 9 and 10 in the paper. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ num_inference_steps = min(self.config.num_train_timesteps, num_inference_steps) self.num_inference_steps = num_inference_steps timesteps = [] jumps = {} for j in range(0, num_inference_steps - jump_length, jump_length): jumps[j] = jump_n_sample - 1 t = num_inference_steps while t >= 1: t = t - 1 timesteps.append(t) if jumps.get(t, 0) > 0: jumps[t] = jumps[t] - 1 for _ in range(jump_length): t = t + 1 timesteps.append(t) timesteps = np.array(timesteps) * (self.config.num_train_timesteps // self.num_inference_steps) self.timesteps = torch.from_numpy(timesteps).to(device) def _get_variance(self, t): prev_timestep = t - self.config.num_train_timesteps // self.num_inference_steps alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from # https://arxiv.org/pdf/2006.11239.pdf) and sample from it to get # previous sample x_{t-1} ~ N(pred_prev_sample, variance) == add # variance to pred_sample # Is equivalent to formula (16) in https://arxiv.org/pdf/2010.02502.pdf # without eta. # variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[t] variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) return variance def step( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, original_image: torch.FloatTensor, mask: torch.FloatTensor, generator: Optional[torch.Generator] = None, return_dict: bool = True, ) -> Union[RePaintSchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. timestep (`int`): The current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. original_image (`torch.FloatTensor`): The original image to inpaint on. mask (`torch.FloatTensor`): The mask where a value of 0.0 indicates which part of the original image to inpaint. generator (`torch.Generator`, *optional*): A random number generator. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_repaint.RePaintSchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_repaint.RePaintSchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_repaint.RePaintSchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ t = timestep prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps # 1. compute alphas, betas alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf pred_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 # 3. Clip "predicted x_0" if self.config.clip_sample: pred_original_sample = torch.clamp(pred_original_sample, -1, 1) # We choose to follow RePaint Algorithm 1 to get x_{t-1}, however we # substitute formula (7) in the algorithm coming from DDPM paper # (formula (4) Algorithm 2 - Sampling) with formula (12) from DDIM paper. # DDIM schedule gives the same results as DDPM with eta = 1.0 # Noise is being reused in 7. and 8., but no impact on quality has # been observed. # 5. Add noise device = model_output.device noise = randn_tensor(model_output.shape, generator=generator, device=device, dtype=model_output.dtype) std_dev_t = self.eta * self._get_variance(timestep) ** 0.5 variance = 0 if t > 0 and self.eta > 0: variance = std_dev_t * noise # 6. compute "direction pointing to x_t" of formula (12) # from https://arxiv.org/pdf/2010.02502.pdf pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_{t-1} of formula (12) from https://arxiv.org/pdf/2010.02502.pdf prev_unknown_part = alpha_prod_t_prev**0.5 * pred_original_sample + pred_sample_direction + variance # 8. Algorithm 1 Line 5 https://arxiv.org/pdf/2201.09865.pdf prev_known_part = (alpha_prod_t_prev**0.5) * original_image + ((1 - alpha_prod_t_prev) ** 0.5) * noise # 9. Algorithm 1 Line 8 https://arxiv.org/pdf/2201.09865.pdf pred_prev_sample = mask * prev_known_part + (1.0 - mask) * prev_unknown_part if not return_dict: return ( pred_prev_sample, pred_original_sample, ) return RePaintSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) def undo_step(self, sample, timestep, generator=None): n = self.config.num_train_timesteps // self.num_inference_steps for i in range(n): beta = self.betas[timestep + i] if sample.device.type == "mps": # randn does not work reproducibly on mps noise = randn_tensor(sample.shape, dtype=sample.dtype, generator=generator) noise = noise.to(sample.device) else: noise = randn_tensor(sample.shape, generator=generator, device=sample.device, dtype=sample.dtype) # 10. Algorithm 1 Line 10 https://arxiv.org/pdf/2201.09865.pdf sample = (1 - beta) ** 0.5 * sample + beta**0.5 * noise return sample def add_noise( self, original_samples: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor, ) -> torch.FloatTensor: raise NotImplementedError("Use `DDPMScheduler.add_noise()` to train for sampling with RePaint.") def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_repaint.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_repaint.py", "repo_id": "diffusers", "token_count": 6516 }
125
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class FlaxControlNetModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"]) class FlaxModelMixin(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"]) class FlaxUNet2DConditionModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"]) class FlaxAutoencoderKL(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"]) class FlaxDiffusionPipeline(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"]) class FlaxDDIMScheduler(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"]) class FlaxDDPMScheduler(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"]) class FlaxDPMSolverMultistepScheduler(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"]) class FlaxEulerDiscreteScheduler(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"]) class FlaxKarrasVeScheduler(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"]) class FlaxLMSDiscreteScheduler(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"]) class FlaxPNDMScheduler(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"]) class FlaxSchedulerMixin(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"]) class FlaxScoreSdeVeScheduler(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax"])
diffusers/src/diffusers/utils/dummy_flax_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_flax_objects.py", "repo_id": "diffusers", "token_count": 2343 }
126
# coding=utf-8 # Copyright 2023 Optuna, Hugging Face # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Logging utilities.""" import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Dict, Optional from tqdm import auto as tqdm_lib _lock = threading.Lock() _default_handler: Optional[logging.Handler] = None log_levels = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _default_log_level = logging.WARNING _tqdm_active = True def _get_default_logging_level() -> int: """ If DIFFUSERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is not - fall back to `_default_log_level` """ env_level_str = os.getenv("DIFFUSERS_VERBOSITY", None) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option DIFFUSERS_VERBOSITY={env_level_str}, " f"has to be one of: { ', '.join(log_levels.keys()) }" ) return _default_log_level def _get_library_name() -> str: return __name__.split(".")[0] def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _default_handler = logging.StreamHandler() # Set sys.stderr as stream. _default_handler.flush = sys.stderr.flush # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) library_root_logger.propagate = False def _reset_library_root_logger() -> None: global _default_handler with _lock: if not _default_handler: return library_root_logger = _get_library_root_logger() library_root_logger.removeHandler(_default_handler) library_root_logger.setLevel(logging.NOTSET) _default_handler = None def get_log_levels_dict() -> Dict[str, int]: return log_levels def get_logger(name: Optional[str] = None) -> logging.Logger: """ Return a logger with the specified name. This function is not supposed to be directly accessed unless you are writing a custom diffusers module. """ if name is None: name = _get_library_name() _configure_library_root_logger() return logging.getLogger(name) def get_verbosity() -> int: """ Return the current level for the 🤗 Diffusers' root logger as an `int`. Returns: `int`: Logging level integers which can be one of: - `50`: `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL` - `40`: `diffusers.logging.ERROR` - `30`: `diffusers.logging.WARNING` or `diffusers.logging.WARN` - `20`: `diffusers.logging.INFO` - `10`: `diffusers.logging.DEBUG` """ _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def set_verbosity(verbosity: int) -> None: """ Set the verbosity level for the 🤗 Diffusers' root logger. Args: verbosity (`int`): Logging level which can be one of: - `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL` - `diffusers.logging.ERROR` - `diffusers.logging.WARNING` or `diffusers.logging.WARN` - `diffusers.logging.INFO` - `diffusers.logging.DEBUG` """ _configure_library_root_logger() _get_library_root_logger().setLevel(verbosity) def set_verbosity_info() -> None: """Set the verbosity to the `INFO` level.""" return set_verbosity(INFO) def set_verbosity_warning() -> None: """Set the verbosity to the `WARNING` level.""" return set_verbosity(WARNING) def set_verbosity_debug() -> None: """Set the verbosity to the `DEBUG` level.""" return set_verbosity(DEBUG) def set_verbosity_error() -> None: """Set the verbosity to the `ERROR` level.""" return set_verbosity(ERROR) def disable_default_handler() -> None: """Disable the default handler of the 🤗 Diffusers' root logger.""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler) def enable_default_handler() -> None: """Enable the default handler of the 🤗 Diffusers' root logger.""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler) def add_handler(handler: logging.Handler) -> None: """adds a handler to the HuggingFace Diffusers' root logger.""" _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(handler) def remove_handler(handler: logging.Handler) -> None: """removes given handler from the HuggingFace Diffusers' root logger.""" _configure_library_root_logger() assert handler is not None and handler in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(handler) def disable_propagation() -> None: """ Disable propagation of the library log outputs. Note that log propagation is disabled by default. """ _configure_library_root_logger() _get_library_root_logger().propagate = False def enable_propagation() -> None: """ Enable propagation of the library log outputs. Please disable the HuggingFace Diffusers' default handler to prevent double logging if the root logger has been configured. """ _configure_library_root_logger() _get_library_root_logger().propagate = True def enable_explicit_format() -> None: """ Enable explicit formatting for every 🤗 Diffusers' logger. The explicit formatter is as follows: ``` [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE ``` All handlers currently bound to the root logger are affected by this method. """ handlers = _get_library_root_logger().handlers for handler in handlers: formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s") handler.setFormatter(formatter) def reset_format() -> None: """ Resets the formatting for 🤗 Diffusers' loggers. All handlers currently bound to the root logger are affected by this method. """ handlers = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(None) def warning_advice(self, *args, **kwargs) -> None: """ This method is identical to `logger.warning()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this warning will not be printed """ no_advisory_warnings = os.getenv("DIFFUSERS_NO_ADVISORY_WARNINGS", False) if no_advisory_warnings: return self.warning(*args, **kwargs) logging.Logger.warning_advice = warning_advice class EmptyTqdm: """Dummy tqdm which doesn't do anything.""" def __init__(self, *args, **kwargs): # pylint: disable=unused-argument self._iterator = args[0] if args else None def __iter__(self): return iter(self._iterator) def __getattr__(self, _): """Return empty function.""" def empty_fn(*args, **kwargs): # pylint: disable=unused-argument return return empty_fn def __enter__(self): return self def __exit__(self, type_, value, traceback): return class _tqdm_cls: def __call__(self, *args, **kwargs): if _tqdm_active: return tqdm_lib.tqdm(*args, **kwargs) else: return EmptyTqdm(*args, **kwargs) def set_lock(self, *args, **kwargs): self._lock = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*args, **kwargs) def get_lock(self): if _tqdm_active: return tqdm_lib.tqdm.get_lock() tqdm = _tqdm_cls() def is_progress_bar_enabled() -> bool: """Return a boolean indicating whether tqdm progress bars are enabled.""" global _tqdm_active return bool(_tqdm_active) def enable_progress_bar() -> None: """Enable tqdm progress bar.""" global _tqdm_active _tqdm_active = True def disable_progress_bar() -> None: """Disable tqdm progress bar.""" global _tqdm_active _tqdm_active = False
diffusers/src/diffusers/utils/logging.py/0
{ "file_path": "diffusers/src/diffusers/utils/logging.py", "repo_id": "diffusers", "token_count": 3614 }
127
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNet2DConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class FlaxUNet2DConditionModelIntegrationTests(unittest.TestCase): def get_file_format(self, seed, shape): return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def get_latents(self, seed=0, shape=(4, 4, 64, 64), fp16=False): dtype = jnp.bfloat16 if fp16 else jnp.float32 image = jnp.array(load_hf_numpy(self.get_file_format(seed, shape)), dtype=dtype) return image def get_unet_model(self, fp16=False, model_id="CompVis/stable-diffusion-v1-4"): dtype = jnp.bfloat16 if fp16 else jnp.float32 revision = "bf16" if fp16 else None model, params = FlaxUNet2DConditionModel.from_pretrained( model_id, subfolder="unet", dtype=dtype, revision=revision ) return model, params def get_encoder_hidden_states(self, seed=0, shape=(4, 77, 768), fp16=False): dtype = jnp.bfloat16 if fp16 else jnp.float32 hidden_states = jnp.array(load_hf_numpy(self.get_file_format(seed, shape)), dtype=dtype) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def test_compvis_sd_v1_4_flax_vs_torch_fp16(self, seed, timestep, expected_slice): model, params = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4", fp16=True) latents = self.get_latents(seed, fp16=True) encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) sample = model.apply( {"params": params}, latents, jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=encoder_hidden_states, ).sample assert sample.shape == latents.shape output_slice = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())), dtype=jnp.float32) expected_output_slice = jnp.array(expected_slice, dtype=jnp.float32) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(output_slice, expected_output_slice, atol=1e-2) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def test_stabilityai_sd_v2_flax_vs_torch_fp16(self, seed, timestep, expected_slice): model, params = self.get_unet_model(model_id="stabilityai/stable-diffusion-2", fp16=True) latents = self.get_latents(seed, shape=(4, 4, 96, 96), fp16=True) encoder_hidden_states = self.get_encoder_hidden_states(seed, shape=(4, 77, 1024), fp16=True) sample = model.apply( {"params": params}, latents, jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=encoder_hidden_states, ).sample assert sample.shape == latents.shape output_slice = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())), dtype=jnp.float32) expected_output_slice = jnp.array(expected_slice, dtype=jnp.float32) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(output_slice, expected_output_slice, atol=1e-2)
diffusers/tests/models/unets/test_models_unet_2d_flax.py/0
{ "file_path": "diffusers/tests/models/unets/test_models_unet_2d_flax.py", "repo_id": "diffusers", "token_count": 2141 }
128
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import torch from diffusers import ( IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class IFPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase): pipeline_class = IFPipeline params = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} def get_dummy_components(self): return self._get_dummy_components() def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def test_save_load_optional_components(self): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) def test_save_load_local(self): self._test_save_load_local() def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( expected_max_diff=1e-2, ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) @slow @require_torch_gpu class IFPipelineSlowTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_all(self): # if pipe_1 = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) pipe_2 = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0", variant="fp16", torch_dtype=torch.float16, text_encoder=None, tokenizer=None ) # pre compute text embeddings and remove T5 to save memory pipe_1.text_encoder.to("cuda") prompt_embeds, negative_prompt_embeds = pipe_1.encode_prompt("anime turtle", device="cuda") del pipe_1.tokenizer del pipe_1.text_encoder gc.collect() pipe_1.tokenizer = None pipe_1.text_encoder = None pipe_1.enable_model_cpu_offload() pipe_2.enable_model_cpu_offload() pipe_1.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_2.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if(pipe_1, pipe_2, prompt_embeds, negative_prompt_embeds) pipe_1.remove_all_hooks() pipe_2.remove_all_hooks() # img2img pipe_1 = IFImg2ImgPipeline(**pipe_1.components) pipe_2 = IFImg2ImgSuperResolutionPipeline(**pipe_2.components) pipe_1.enable_model_cpu_offload() pipe_2.enable_model_cpu_offload() pipe_1.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_2.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_img2img(pipe_1, pipe_2, prompt_embeds, negative_prompt_embeds) pipe_1.remove_all_hooks() pipe_2.remove_all_hooks() # inpainting pipe_1 = IFInpaintingPipeline(**pipe_1.components) pipe_2 = IFInpaintingSuperResolutionPipeline(**pipe_2.components) pipe_1.enable_model_cpu_offload() pipe_2.enable_model_cpu_offload() pipe_1.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_2.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_inpainting(pipe_1, pipe_2, prompt_embeds, negative_prompt_embeds) def _test_if(self, pipe_1, pipe_2, prompt_embeds, negative_prompt_embeds): # pipeline 1 _start_torch_memory_measurement() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe_1( prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, num_inference_steps=2, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (64, 64, 3) mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(image, expected_image) # pipeline 2 _start_torch_memory_measurement() generator = torch.Generator(device="cpu").manual_seed(0) image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device) output = pipe_2( prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, image=image, generator=generator, num_inference_steps=2, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(image, expected_image) def _test_if_img2img(self, pipe_1, pipe_2, prompt_embeds, negative_prompt_embeds): # pipeline 1 _start_torch_memory_measurement() image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device) generator = torch.Generator(device="cpu").manual_seed(0) output = pipe_1( prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, image=image, num_inference_steps=2, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (64, 64, 3) mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(image, expected_image) # pipeline 2 _start_torch_memory_measurement() generator = torch.Generator(device="cpu").manual_seed(0) original_image = floats_tensor((1, 3, 256, 256), rng=random.Random(0)).to(torch_device) image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device) output = pipe_2( prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, image=image, original_image=original_image, generator=generator, num_inference_steps=2, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(image, expected_image) def _test_if_inpainting(self, pipe_1, pipe_2, prompt_embeds, negative_prompt_embeds): # pipeline 1 _start_torch_memory_measurement() image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device) mask_image = floats_tensor((1, 3, 64, 64), rng=random.Random(1)).to(torch_device) generator = torch.Generator(device="cpu").manual_seed(0) output = pipe_1( prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, image=image, mask_image=mask_image, num_inference_steps=2, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (64, 64, 3) mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(image, expected_image) # pipeline 2 _start_torch_memory_measurement() generator = torch.Generator(device="cpu").manual_seed(0) image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device) original_image = floats_tensor((1, 3, 256, 256), rng=random.Random(0)).to(torch_device) mask_image = floats_tensor((1, 3, 256, 256), rng=random.Random(1)).to(torch_device) output = pipe_2( prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, image=image, mask_image=mask_image, original_image=original_image, generator=generator, num_inference_steps=2, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(image, expected_image) def _start_torch_memory_measurement(): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
diffusers/tests/pipelines/deepfloyd_if/test_if.py/0
{ "file_path": "diffusers/tests/pipelines/deepfloyd_if/test_if.py", "repo_id": "diffusers", "token_count": 5218 }
129
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } model = PriorTransformer(**model_kwargs) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape)) return model @property def dummy_image_encoder(self): torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=224, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) model = CLIPVisionModelWithProjection(config) return model @property def dummy_image_processor(self): image_processor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) return image_processor def get_dummy_components(self): prior = self.dummy_prior image_encoder = self.dummy_image_encoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer image_processor = self.dummy_image_processor scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample=True, clip_sample_range=10.0, ) components = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyPriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyPriorPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummy = Dummies() return dummy.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummy = Dummies() return dummy.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_prior(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.image_embeds image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -10:] image_from_tuple_slice = image_from_tuple[0, -10:] assert image.shape == (1, 32) expected_slice = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-2) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, )
diffusers/tests/pipelines/kandinsky/test_kandinsky_prior.py/0
{ "file_path": "diffusers/tests/pipelines/kandinsky/test_kandinsky_prior.py", "repo_id": "diffusers", "token_count": 3240 }
130
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, nightly, numpy_cosine_similarity_distance, require_torch_gpu, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionDiffEditPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionDiffEditPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"} batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"} image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) inverse_scheduler = DDIMInverseScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_zero=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "inverse_scheduler": inverse_scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): mask = floats_tensor((1, 16, 16), rng=random.Random(seed)).to(device) latents = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "a dog and a newt", "mask_image": mask, "image_latents": latents, "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def get_dummy_mask_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": image, "source_prompt": "a cat and a frog", "target_prompt": "a dog and a newt", "generator": generator, "num_inference_steps": 2, "num_maps_per_mask": 2, "mask_encode_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def get_dummy_inversion_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": image, "prompt": "a cat and a frog", "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "decode_latents": True, "output_type": "numpy", } return inputs def test_save_load_optional_components(self): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components}) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 1e-4) def test_mask(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_mask_inputs(device) mask = pipe.generate_mask(**inputs) mask_slice = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16)) expected_slice = np.array([0] * 9) max_diff = np.abs(mask_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) self.assertEqual(mask[0, -3, -4], 0) def test_inversion(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) image = pipe.invert(**inputs).images image_slice = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3)) expected_slice = np.array( [0.5160, 0.5115, 0.5060, 0.5456, 0.4704, 0.5060, 0.5019, 0.4405, 0.4726], ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-3) def test_inversion_dpm(self): device = "cpu" components = self.get_dummy_components() scheduler_args = {"beta_start": 0.00085, "beta_end": 0.012, "beta_schedule": "scaled_linear"} components["scheduler"] = DPMSolverMultistepScheduler(**scheduler_args) components["inverse_scheduler"] = DPMSolverMultistepInverseScheduler(**scheduler_args) pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) image = pipe.invert(**inputs).images image_slice = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3)) expected_slice = np.array( [0.5305, 0.4673, 0.5314, 0.5308, 0.4886, 0.5279, 0.5142, 0.4724, 0.4892], ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) @require_torch_gpu @nightly class StableDiffusionDiffEditPipelineIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def setUpClass(cls): raw_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) raw_image = raw_image.convert("RGB").resize((256, 256)) cls.raw_image = raw_image def test_stable_diffusion_diffedit_full(self): generator = torch.manual_seed(0) pipe = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1-base", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.scheduler.clip_sample = True pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) source_prompt = "a bowl of fruit" target_prompt = "a bowl of pears" mask_image = pipe.generate_mask( image=self.raw_image, source_prompt=source_prompt, target_prompt=target_prompt, generator=generator, ) inv_latents = pipe.invert( prompt=source_prompt, image=self.raw_image, inpaint_strength=0.7, generator=generator, num_inference_steps=5, ).latents image = pipe( prompt=target_prompt, mask_image=mask_image, image_latents=inv_latents, generator=generator, negative_prompt=source_prompt, inpaint_strength=0.7, num_inference_steps=5, output_type="np", ).images[0] expected_image = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((256, 256)) ) / 255 ) assert numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) < 2e-1 @nightly @require_torch_gpu class StableDiffusionDiffEditPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def setUpClass(cls): raw_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) raw_image = raw_image.convert("RGB").resize((768, 768)) cls.raw_image = raw_image def test_stable_diffusion_diffedit_dpm(self): generator = torch.manual_seed(0) pipe = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.inverse_scheduler = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) source_prompt = "a bowl of fruit" target_prompt = "a bowl of pears" mask_image = pipe.generate_mask( image=self.raw_image, source_prompt=source_prompt, target_prompt=target_prompt, generator=generator, ) inv_latents = pipe.invert( prompt=source_prompt, image=self.raw_image, inpaint_strength=0.7, generator=generator, num_inference_steps=25, ).latents image = pipe( prompt=target_prompt, mask_image=mask_image, image_latents=inv_latents, generator=generator, negative_prompt=source_prompt, inpaint_strength=0.7, num_inference_steps=25, output_type="numpy", ).images[0] expected_image = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768)) ) / 255 ) assert np.abs((expected_image - image).max()) < 5e-1
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py", "repo_id": "diffusers", "token_count": 7245 }
131
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device enable_full_determinism() @nightly @require_torch_gpu class StableDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_1(self): sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_scheduler("sample_euler") prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=9.0, num_inference_steps=20, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_2(self): sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_scheduler("sample_euler") prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=9.0, num_inference_steps=20, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-1 def test_stable_diffusion_karras_sigmas(self): sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_scheduler("sample_dpmpp_2m") prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe( [prompt], generator=generator, guidance_scale=7.5, num_inference_steps=15, output_type="np", use_karras_sigmas=True, ) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_noise_sampler_seed(self): sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_scheduler("sample_dpmpp_sde") prompt = "A painting of a squirrel eating a burger" seed = 0 images1 = sd_pipe( [prompt], generator=torch.manual_seed(seed), noise_sampler_seed=seed, guidance_scale=9.0, num_inference_steps=20, output_type="np", ).images images2 = sd_pipe( [prompt], generator=torch.manual_seed(seed), noise_sampler_seed=seed, guidance_scale=9.0, num_inference_steps=20, output_type="np", ).images assert images1.shape == (1, 512, 512, 3) assert images2.shape == (1, 512, 512, 3) assert np.abs(images1.flatten() - images2.flatten()).max() < 1e-2
diffusers/tests/pipelines/stable_diffusion_k_diffusion/test_stable_diffusion_k_diffusion.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_k_diffusion/test_stable_diffusion_k_diffusion.py", "repo_id": "diffusers", "token_count": 2114 }
132
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNet3DConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, is_flaky, nightly, numpy_cosine_similarity_distance, skip_mps, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class VideoToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = VideoToVideoSDPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} test_attention_slicing = False # No `output_type`. required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet3DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock3D", "DownBlock3D"), up_block_types=("UpBlock3D", "CrossAttnUpBlock3D"), cross_attention_dim=32, attention_head_dim=4, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=True, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[ 8, ], in_channels=3, out_channels=3, down_block_types=[ "DownEncoderBlock2D", ], up_block_types=["UpDecoderBlock2D"], latent_channels=4, sample_size=32, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): # 3 frames video = floats_tensor((1, 3, 3, 32, 32), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "video": video, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def test_text_to_video_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = VideoToVideoSDPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["output_type"] = "np" frames = sd_pipe(**inputs).frames image_slice = frames[0][0][-3:, -3:, -1] assert frames[0][0].shape == (32, 32, 3) expected_slice = np.array([0.6391, 0.5350, 0.5202, 0.5521, 0.5453, 0.5393, 0.6652, 0.5270, 0.5185]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @is_flaky() def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=0.001) @is_flaky() def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent() @is_flaky() def test_save_load_local(self): super().test_save_load_local() @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=5e-3) # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_consistent(self): pass # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_single_identical(self): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.") def test_num_images_per_prompt(self): pass def test_progress_bar(self): return super().test_progress_bar() @nightly @skip_mps class VideoToVideoSDPipelineSlowTests(unittest.TestCase): def test_two_step_model(self): pipe = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16) pipe.enable_model_cpu_offload() # 10 frames generator = torch.Generator(device="cpu").manual_seed(0) video = torch.randn((1, 10, 3, 320, 576), generator=generator) prompt = "Spiderman is surfing" generator = torch.Generator(device="cpu").manual_seed(0) video_frames = pipe(prompt, video=video, generator=generator, num_inference_steps=3, output_type="np").frames expected_array = np.array( [0.17114258, 0.13720703, 0.08886719, 0.14819336, 0.1730957, 0.24584961, 0.22021484, 0.35180664, 0.2607422] ) output_array = video_frames[0, 0, :3, :3, 0].flatten() assert numpy_cosine_similarity_distance(expected_array, output_array) < 1e-3
diffusers/tests/pipelines/text_to_video_synthesis/test_video_to_video.py/0
{ "file_path": "diffusers/tests/pipelines/text_to_video_synthesis/test_video_to_video.py", "repo_id": "diffusers", "token_count": 3466 }
133
# Copyright 2023 ParaDiGMS authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class DDPMParallelSchedulerTest(SchedulerCommonTest): scheduler_classes = (DDPMParallelScheduler,) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_variance_type(self): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=variance) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(clip_sample=clip_sample) def test_thresholding(self): self.check_over_configs(thresholding=False) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, ) def test_prediction_type(self): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_time_indices(self): for t in [0, 500, 999]: self.check_over_forward(time_step=t) def test_variance(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5 def test_rescale_betas_zero_snr(self): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) def test_batch_step_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) model = self.dummy_model() sample1 = self.dummy_sample_deter sample2 = self.dummy_sample_deter + 0.1 sample3 = self.dummy_sample_deter - 0.1 per_sample_batch = sample1.shape[0] samples = torch.stack([sample1, sample2, sample3], dim=0) timesteps = torch.arange(num_trained_timesteps)[0:3, None].repeat(1, per_sample_batch) residual = model(samples.flatten(0, 1), timesteps.flatten(0, 1)) pred_prev_sample = scheduler.batch_step_no_noise(residual, timesteps.flatten(0, 1), samples.flatten(0, 1)) result_sum = torch.sum(torch.abs(pred_prev_sample)) result_mean = torch.mean(torch.abs(pred_prev_sample)) assert abs(result_sum.item() - 1153.1833) < 1e-2 assert abs(result_mean.item() - 0.5005) < 1e-3 def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) for t in reversed(range(num_trained_timesteps)): # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 258.9606) < 1e-2 assert abs(result_mean.item() - 0.3372) < 1e-3 def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) for t in reversed(range(num_trained_timesteps)): # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 202.0296) < 1e-2 assert abs(result_mean.item() - 0.2631) < 1e-3 def test_custom_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=timesteps) scheduler_timesteps = scheduler.timesteps for i, timestep in enumerate(scheduler_timesteps): if i == len(timesteps) - 1: expected_prev_t = -1 else: expected_prev_t = timesteps[i + 1] prev_t = scheduler.previous_timestep(timestep) prev_t = prev_t.item() self.assertEqual(prev_t, expected_prev_t) def test_custom_timesteps_increasing_order(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 51, 0] with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."): scheduler.set_timesteps(timesteps=timesteps) def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] num_inference_steps = len(timesteps) with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."): scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps) def test_custom_timesteps_too_large(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [scheduler.config.num_train_timesteps] with self.assertRaises( ValueError, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ): scheduler.set_timesteps(timesteps=timesteps) def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) t_start = num_trained_timesteps - 2 model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) # add noise noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for t in timesteps: # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 387.9466) < 1e-2, f" expected result sum 387.9466, but get {result_sum}" assert abs(result_mean.item() - 0.5051) < 1e-3, f" expected result mean 0.5051, but get {result_mean}"
diffusers/tests/schedulers/test_scheduler_ddpm_parallel.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_ddpm_parallel.py", "repo_id": "diffusers", "token_count": 4272 }
134
import torch from diffusers import SASolverScheduler from diffusers.utils.testing_utils import require_torchsde, torch_device from .test_schedulers import SchedulerCommonTest @require_torchsde class SASolverSchedulerTest(SchedulerCommonTest): scheduler_classes = (SASolverScheduler,) forward_default_kwargs = (("num_inference_steps", 10),) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] scheduler.model_outputs = dummy_past_residuals[ : max( scheduler.config.predictor_order, scheduler.config.corrector_order - 1, ) ] time_step_0 = scheduler.timesteps[5] time_step_1 = scheduler.timesteps[6] output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) generator = torch.manual_seed(0) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t, generator=generator) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["cpu"]: assert abs(result_sum.item() - 337.394287109375) < 1e-2 assert abs(result_mean.item() - 0.43931546807289124) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 329.1999816894531) < 1e-2 assert abs(result_mean.item() - 0.4286458194255829) < 1e-3 else: print("None") def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) generator = torch.manual_seed(0) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t, generator=generator) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["cpu"]: assert abs(result_sum.item() - 193.1467742919922) < 1e-2 assert abs(result_mean.item() - 0.2514931857585907) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 193.4154052734375) < 1e-2 assert abs(result_mean.item() - 0.2518429756164551) < 1e-3 else: print("None") def test_full_loop_device(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma generator = torch.manual_seed(0) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["cpu"]: assert abs(result_sum.item() - 337.394287109375) < 1e-2 assert abs(result_mean.item() - 0.43931546807289124) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 337.394287109375) < 1e-2 assert abs(result_mean.item() - 0.4393154978752136) < 1e-3 else: print("None") def test_full_loop_device_karras_sigmas(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma sample = sample.to(torch_device) generator = torch.manual_seed(0) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["cpu"]: assert abs(result_sum.item() - 837.2554931640625) < 1e-2 assert abs(result_mean.item() - 1.0901764631271362) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 837.25537109375) < 1e-2 assert abs(result_mean.item() - 1.0901763439178467) < 1e-2 else: print("None")
diffusers/tests/schedulers/test_scheduler_sasolver.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_sasolver.py", "repo_id": "diffusers", "token_count": 3629 }
135
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from collections import defaultdict def overwrite_file(file, class_name, test_name, correct_line, done_test): _id = f"{file}_{class_name}_{test_name}" done_test[_id] += 1 with open(file, "r") as f: lines = f.readlines() class_regex = f"class {class_name}(" test_regex = f"{4 * ' '}def {test_name}(" line_begin_regex = f"{8 * ' '}{correct_line.split()[0]}" another_line_begin_regex = f"{16 * ' '}{correct_line.split()[0]}" in_class = False in_func = False in_line = False insert_line = False count = 0 spaces = 0 new_lines = [] for line in lines: if line.startswith(class_regex): in_class = True elif in_class and line.startswith(test_regex): in_func = True elif in_class and in_func and (line.startswith(line_begin_regex) or line.startswith(another_line_begin_regex)): spaces = len(line.split(correct_line.split()[0])[0]) count += 1 if count == done_test[_id]: in_line = True if in_class and in_func and in_line: if ")" not in line: continue else: insert_line = True if in_class and in_func and in_line and insert_line: new_lines.append(f"{spaces * ' '}{correct_line}") in_class = in_func = in_line = insert_line = False else: new_lines.append(line) with open(file, "w") as f: for line in new_lines: f.write(line) def main(correct, fail=None): if fail is not None: with open(fail, "r") as f: test_failures = {l.strip() for l in f.readlines()} else: test_failures = None with open(correct, "r") as f: correct_lines = f.readlines() done_tests = defaultdict(int) for line in correct_lines: file, class_name, test_name, correct_line = line.split(";") if test_failures is None or "::".join([file, class_name, test_name]) in test_failures: overwrite_file(file, class_name, test_name, correct_line, done_tests) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--correct_filename", help="filename of tests with expected result") parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None) args = parser.parse_args() main(args.correct_filename, args.fail_filename)
diffusers/utils/overwrite_expected_slice.py/0
{ "file_path": "diffusers/utils/overwrite_expected_slice.py", "repo_id": "diffusers", "token_count": 1259 }
136
# Hugging Face Diffusion Models Course [![License](https://img.shields.io/static/v1?label=License&message=Apache&color=<Yellow>)](https://github.com/huggingface/diffusion-models-class/blob/main/LICENSE) &nbsp; [![GitHub forks](https://img.shields.io/github/forks/huggingface/diffusion-models-class.svg?style=social&label=Fork&maxAge=2592000)](https://github.com/dhakalnirajan/diffusion-models-class) &nbsp; [![Made with Jupyter](https://img.shields.io/badge/Made%20with-Jupyter-red?style=flat-square&logo=Jupyter)](https://jupyter.org/try) &nbsp; ![PyTorch](https://img.shields.io/badge/PyTorch-%23EE4C2C.svg?style=flat-square&logo=PyTorch&logoColor=white) In this free course, you will: - 👩‍🎓 Study the theory behind diffusion models - 🧨 Learn how to generate images and audio with the popular 🤗 Diffusers library - 🏋️‍♂️ Train your own diffusion models from scratch - 📻 Fine-tune existing diffusion models on new datasets - 🗺 Explore conditional generation and guidance - 🧑‍🔬 Create your own custom diffusion model pipelines Register via the **[signup form](https://huggingface.us17.list-manage.com/subscribe?u=7f57e683fa28b51bfc493d048&id=ef963b4162)** and then join us on **[Discord](https://discord.gg/aYka4Yhff9)** to get the conversations started. Instructions on how to join specific categories/channels **[are here.](https://discord.com/channels/879548962464493619/1014509271255367701)** ## Syllabus | 📆 Publishing date | 📘 Unit | 👩‍💻 Hands-on | |---------------|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------| | November 28, 2022 | [An Introduction to Diffusion Models](https://github.com/huggingface/diffusion-models-class/tree/main/unit1)| Introduction to Diffusers and Diffusion Models From Scratch | | December 12, 2022 | [Fine-Tuning and Guidance](https://github.com/huggingface/diffusion-models-class/tree/main/unit2) | Fine-Tuning a Diffusion Model on New Data and Adding Guidance | | December 21, 2022 | [Stable Diffusion](https://github.com/huggingface/diffusion-models-class/tree/main/unit3) | Exploring a Powerful Text-Conditioned Latent Diffusion Model | | January 2023 (TBC) | [Doing More with Diffusion](https://github.com/huggingface/diffusion-models-class/tree/main/unit4) | Advanced Techniques to Take Diffusion Further | More information coming soon! ## Prerequisites - Good skills in Python 🐍 - Basics in Deep Learning and Pytorch If it's not the case yet, you can check these free resources: - Python: https://www.udacity.com/course/introduction-to-python--ud1110 - Intro to Deep Learning with PyTorch: https://www.udacity.com/course/deep-learning-pytorch--ud188 - PyTorch in 60 min: https://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html ## FAQ **Is this class free?** Yes, totally free 🥳. **Do I need to have a Hugging Face account to follow the course?** Yes, to push your custom models and pipelines to the hub, you need an account (it's free) 🤗. You can create one here 👉 [https://huggingface.co/join](https://huggingface.co/join) **What’s the format of the class?** The course will consist of at least **4 Units.** More will be added as time goes on, on topics like diffusion for audio. Each unit consists of some theory and background alongside one or more hands-on notebooks. Some units will also contain suggested projects and we'll have competitions and swag for the best pipelines and demos (more details TDB). ## 🌎 Languages and translations Members of the 🤗 community have begun translating the course! We're planning to host this course on the [Hugging Face website](https://huggingface.co/), so if you're interested in contributing a translation, we recommend waiting until we've formatted the English content in it's final form. | Language | Authors | |:------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------| | [Chinese](https://github.com/darcula1993/diffusion-models-class-CN/blob/main/README_CN.md) | [@darcula1993](https://github.com/darcula1993) [@XhrLeokk](https://github.com/XhrLeokk) [@SuSung-boy](https://github.com/SuSung-boy) [@Hoi2022](https://github.com/Hoi2022)| | [Japanese](https://github.com/eltociear/diffusion-models-class-JA/blob/main/README_JA.md) | [@eltociear](https://github.com/eltociear) [@nazuki155](https://github.com/nazuki155)| | [Korean](https://github.com/deep-diver/diffusion-models-class/blob/main/README_KR.md) | [@deep-diver](https://github.com/deep-diver)
diffusion-models-class/README.md/0
{ "file_path": "diffusion-models-class/README.md", "repo_id": "diffusion-models-class", "token_count": 1535 }
137
<jupyter_start><jupyter_text>Diffusion Models from ScratchSometimes it is helpful to consider the simplest possible version of something to better understand how it works. We're going to try that in this notebook, beginning with a 'toy' diffusion model to see how the different pieces work, and then examining how they differ from a more complex implementation. We will look at - The corruption process (adding noise to data)- What a UNet is, and how to implement an extremely minimal one from scratch- Diffusion model training- Sampling theoryThen we'll compare our versions with the diffusers DDPM implementation, exploring- Improvements over our mini UNet- The DDPM noise schedule- Differences in training objective- Timestep conditioning- Sampling approachesThis notebook is fairly in-depth, and can safely be skipped if you're not excited about a from-scratch deep dive! It is also worth noting that most of the code here is for illustrative purposes, and I wouldn't recommend directly adopting any of it for your own work (unless you're just trying improve on the examples shown here for learning purposes). Setup and Imports:<jupyter_code>!pip install -q diffusers import torch import torchvision from torch import nn from torch.nn import functional as F from torch.utils.data import DataLoader from diffusers import DDPMScheduler, UNet2DModel from matplotlib import pyplot as plt device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(f'Using device: {device}')<jupyter_output>Using device: cuda<jupyter_text>The DataHere we're going to test things with a very small dataset: mnist. If you'd like to give the model a slightly harder challenge without changing anything else, torchvision.datasets.FashionMNIST should work as a drop-in replacement.<jupyter_code>dataset = torchvision.datasets.MNIST(root="mnist/", train=True, download=True, transform=torchvision.transforms.ToTensor()) train_dataloader = DataLoader(dataset, batch_size=8, shuffle=True) x, y = next(iter(train_dataloader)) print('Input shape:', x.shape) print('Labels:', y) plt.imshow(torchvision.utils.make_grid(x)[0], cmap='Greys');<jupyter_output>Input shape: torch.Size([8, 1, 28, 28]) Labels: tensor([1, 9, 7, 3, 5, 2, 1, 4])<jupyter_text>Each image is a greyscale 28px by 28px drawing of a digit, with values ranging from 0 to 1. The Corruption ProcessPretend you haven't read any diffusion model papers, but you know the process involves adding noise. How would you do it?We probably want an easy way to control the amount of corruption. So what if we take in a parameter for the `amount` of noise to add, and then we do:`noise = torch.rand_like(x)` `noisy_x = (1-amount)*x + amount*noise`If amount = 0, we get back the input without any changes. If amount gets up to 1, we get back noise with no trace of the input x. By mixing the input with noise this way, we keep the output in the same range (0 to 1).We can implement this fairly easily (just watch the shapes so you don't get burnt by broadcasting rules):<jupyter_code>def corrupt(x, amount): """Corrupt the input `x` by mixing it with noise according to `amount`""" noise = torch.rand_like(x) amount = amount.view(-1, 1, 1, 1) # Sort shape so broadcasting works return x*(1-amount) + noise*amount<jupyter_output><empty_output><jupyter_text>And looking at the results visually to see that it works as expected:<jupyter_code># Plotting the input data fig, axs = plt.subplots(2, 1, figsize=(12, 5)) axs[0].set_title('Input data') axs[0].imshow(torchvision.utils.make_grid(x)[0], cmap='Greys') # Adding noise amount = torch.linspace(0, 1, x.shape[0]) # Left to right -> more corruption noised_x = corrupt(x, amount) # Plotting the noised version axs[1].set_title('Corrupted data (-- amount increases -->)') axs[1].imshow(torchvision.utils.make_grid(noised_x)[0], cmap='Greys');<jupyter_output><empty_output><jupyter_text>As noise amount approaches one, our data begins to look like pure random noise. But for most noie_amounts, you can guess the digit fairly well. Do you think this is optimal? The ModelWe'd like a model that takes in a 28px noisy images and outputs a prediction of the same shape. A popular choice here is an architecture called a UNet. [Originally invented for segmentation tasks in medical imagery](https://arxiv.org/abs/1505.04597), a UNet consists of a 'constricting path' through which data is compressed down and an 'expanding path' through which it expands back up to the original dimension (similar to an autoencoder) but also features skip connections that allow for information and gradients to flow across at different levels. Some UNets feature complex blocks at each stage, but for this toy demo we'll build a minimal example that takes in a one-channel image and passes it through three convolutional layers on the down path (the down_layers in the diagram and code) and three on the up path, with skip connections between the down and up layers. We'll use max pooling for downsampling and `nn.Upsample` for upsampling rather than relying on learnable layers like more complex UNets. Here is the rough architecture showing the number of channels in the output of each layer: This is what that looks like in code:<jupyter_code>class BasicUNet(nn.Module): """A minimal UNet implementation.""" def __init__(self, in_channels=1, out_channels=1): super().__init__() self.down_layers = torch.nn.ModuleList([ nn.Conv2d(in_channels, 32, kernel_size=5, padding=2), nn.Conv2d(32, 64, kernel_size=5, padding=2), nn.Conv2d(64, 64, kernel_size=5, padding=2), ]) self.up_layers = torch.nn.ModuleList([ nn.Conv2d(64, 64, kernel_size=5, padding=2), nn.Conv2d(64, 32, kernel_size=5, padding=2), nn.Conv2d(32, out_channels, kernel_size=5, padding=2), ]) self.act = nn.SiLU() # The activation function self.downscale = nn.MaxPool2d(2) self.upscale = nn.Upsample(scale_factor=2) def forward(self, x): h = [] for i, l in enumerate(self.down_layers): x = self.act(l(x)) # Through the layer and the activation function if i < 2: # For all but the third (final) down layer: h.append(x) # Storing output for skip connection x = self.downscale(x) # Downscale ready for the next layer for i, l in enumerate(self.up_layers): if i > 0: # For all except the first up layer x = self.upscale(x) # Upscale x += h.pop() # Fetching stored output (skip connection) x = self.act(l(x)) # Through the layer and the activation function return x<jupyter_output><empty_output><jupyter_text>We can verify that the output shape is the same as the input, as we expect:<jupyter_code>net = BasicUNet() x = torch.rand(8, 1, 28, 28) net(x).shape<jupyter_output><empty_output><jupyter_text>This network has just over 300,000 parameters:<jupyter_code>sum([p.numel() for p in net.parameters()])<jupyter_output><empty_output><jupyter_text>You can explore changing the number of channels in each layer or swapping in different architectures if you want. Training the networkSo what should the model do, exactly? Again, there are various takes on this but for this demo let's pick a simple framing: given a corrupted input noisy_x the model should output its best guess for what the original x looks like. We will compare this to the actual value via the mean squared error.We can now have a go at training the network. - Get a batch of data- Corrupt it by random amounts- Feed it through the model- Compare the model predictions with the clean images to calculate our loss- Update the model's parameters accordingly.Feel free to modify this and see if you can get it working better!<jupyter_code># Dataloader (you can mess with batch size) batch_size = 128 train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True) # How many runs through the data should we do? n_epochs = 3 # Create the network net = BasicUNet() net.to(device) # Our loss function loss_fn = nn.MSELoss() # The optimizer opt = torch.optim.Adam(net.parameters(), lr=1e-3) # Keeping a record of the losses for later viewing losses = [] # The training loop for epoch in range(n_epochs): for x, y in train_dataloader: # Get some data and prepare the corrupted version x = x.to(device) # Data on the GPU noise_amount = torch.rand(x.shape[0]).to(device) # Pick random noise amounts noisy_x = corrupt(x, noise_amount) # Create our noisy x # Get the model prediction pred = net(noisy_x) # Calculate the loss loss = loss_fn(pred, x) # How close is the output to the true 'clean' x? # Backprop and update the params: opt.zero_grad() loss.backward() opt.step() # Store the loss for later losses.append(loss.item()) # Print our the average of the loss values for this epoch: avg_loss = sum(losses[-len(train_dataloader):])/len(train_dataloader) print(f'Finished epoch {epoch}. Average loss for this epoch: {avg_loss:05f}') # View the loss curve plt.plot(losses) plt.ylim(0, 0.1);<jupyter_output>Finished epoch 0. Average loss for this epoch: 0.026736 Finished epoch 1. Average loss for this epoch: 0.020692 Finished epoch 2. Average loss for this epoch: 0.018887<jupyter_text>We can try to see what the model predictions look like by grabbing a batch of data, corrupting it by different amounts and then seeing the models predictions:<jupyter_code>#@markdown Visualizing model predictions on noisy inputs: # Fetch some data x, y = next(iter(train_dataloader)) x = x[:8] # Only using the first 8 for easy plotting # Corrupt with a range of amounts amount = torch.linspace(0, 1, x.shape[0]) # Left to right -> more corruption noised_x = corrupt(x, amount) # Get the model predictions with torch.no_grad(): preds = net(noised_x.to(device)).detach().cpu() # Plot fig, axs = plt.subplots(3, 1, figsize=(12, 7)) axs[0].set_title('Input data') axs[0].imshow(torchvision.utils.make_grid(x)[0].clip(0, 1), cmap='Greys') axs[1].set_title('Corrupted data') axs[1].imshow(torchvision.utils.make_grid(noised_x)[0].clip(0, 1), cmap='Greys') axs[2].set_title('Network Predictions') axs[2].imshow(torchvision.utils.make_grid(preds)[0].clip(0, 1), cmap='Greys');<jupyter_output><empty_output><jupyter_text>You can see that for the lower amounts the predictions are pretty good! But as the level gets very high there is less for the model to work with, and by the time we get to amount=1 it outputs a blurry mess close to the mean of the dataset to try and hedge its bets on what the output might look like... SamplingIf our predictions at high noise levels aren't very good, how do we generate images?Well, what if we start from random noise, look at the model predictions but then only move a small amount towards that prediction - say, 20% of the way there. Now we have a very noisy image in which there is perhaps a hint of structure, which we can feed into the model to get a new prediction. The hope is that this new prediction is slightly better than the first one (since our starting point is slightly less noisy) and so we can take another small step with this new, better prediction.Repeat a few times and (if all goes well) we get an image out! Here is that process illustrated over just 5 steps, visualizing the model input (left) and the predicted denoised images (right) at each stage. Note that even though the model predicts the denoised image even at step 1, we only move x part of the way there. Over a few steps the structures appear and are refined, until we get our final outputs.<jupyter_code>#@markdown Sampling strategy: Break the process into 5 steps and move 1/5'th of the way there each time: n_steps = 5 x = torch.rand(8, 1, 28, 28).to(device) # Start from random step_history = [x.detach().cpu()] pred_output_history = [] for i in range(n_steps): with torch.no_grad(): # No need to track gradients during inference pred = net(x) # Predict the denoised x0 pred_output_history.append(pred.detach().cpu()) # Store model output for plotting mix_factor = 1/(n_steps - i) # How much we move towards the prediction x = x*(1-mix_factor) + pred*mix_factor # Move part of the way there step_history.append(x.detach().cpu()) # Store step for plotting fig, axs = plt.subplots(n_steps, 2, figsize=(9, 4), sharex=True) axs[0,0].set_title('x (model input)') axs[0,1].set_title('model prediction') for i in range(n_steps): axs[i, 0].imshow(torchvision.utils.make_grid(step_history[i])[0].clip(0, 1), cmap='Greys') axs[i, 1].imshow(torchvision.utils.make_grid(pred_output_history[i])[0].clip(0, 1), cmap='Greys')<jupyter_output><empty_output><jupyter_text>We can split the process up into more steps, and hope for better images that way:<jupyter_code>#@markdown Showing more results, using 40 sampling steps n_steps = 40 x = torch.rand(64, 1, 28, 28).to(device) for i in range(n_steps): noise_amount = torch.ones((x.shape[0], )).to(device) * (1-(i/n_steps)) # Starting high going low with torch.no_grad(): pred = net(x) mix_factor = 1/(n_steps - i) x = x*(1-mix_factor) + pred*mix_factor fig, ax = plt.subplots(1, 1, figsize=(12, 12)) ax.imshow(torchvision.utils.make_grid(x.detach().cpu(), nrow=8)[0].clip(0, 1), cmap='Greys')<jupyter_output><empty_output><jupyter_text>Not great, but there are some recognizable digits there! You can experiment with training for longer (say, 10 or 20 epochs) and tweaking model config, learning rate, optimizer and so on. Also, don't forget that fashionMNIST is a one-line replacement if you want a slightly harder dataset to try. Comparison To DDPMIn this section we'll take a look at how our toy implementation differs from the approach used in the other notebook ([Introduction to Diffusers](https://github.com/huggingface/diffusion-models-class/blob/main/unit1/01_introduction_to_diffusers.ipynb)), which is based on the DDPM paper.We'll see that* The diffusers `UNet2DModel` is a bit more advanced than our BasicUNet* The corruption process in handled differently* The training objective is different, involving predicting the noise rather than the denoised image* The model is conditioned on the amount of noise present via timestep conditioning, where t is passed as an additional argument to the forward method.* There are a number of different sampling strategies available, which should work better than our simplistic version above.There have been a number of improvements suggested since the DDPM paper came out, but this example is hopefully instructive as to the different available design decisions. Once you've read through this, you may enjoy diving into the paper ['Elucidating the Design Space of Diffusion-Based Generative Models'](https://arxiv.org/abs/2206.00364) which explores all of these components in some detail and makes new recommendations for how to get the best performance. If all of this is too technical or intimidating, don't worry! Feel free to skip the rest of this notebook or save it for a rainy day. The UNetThe diffusers UNet2DModel model has a number of improvements over our basic UNet above:* GroupNorm applies group normalization to the inputs of each block* Dropout layers for smoother training* Multiple resnet layers per block (if layers_per_block isn't set to 1)* Attention (usually used only at lower resolution blocks)* Conditioning on the timestep. * Downsampling and upsampling blocks with learnable parametersLet's create and inspect a UNet2DModel:<jupyter_code>model = UNet2DModel( sample_size=28, # the target image resolution in_channels=1, # the number of input channels, 3 for RGB images out_channels=1, # the number of output channels layers_per_block=2, # how many ResNet layers to use per UNet block block_out_channels=(32, 64, 64), # Roughly matching our basic unet example down_block_types=( "DownBlock2D", # a regular ResNet downsampling block "AttnDownBlock2D", # a ResNet downsampling block with spatial self-attention "AttnDownBlock2D", ), up_block_types=( "AttnUpBlock2D", "AttnUpBlock2D", # a ResNet upsampling block with spatial self-attention "UpBlock2D", # a regular ResNet upsampling block ), ) print(model)<jupyter_output>UNet2DModel( (conv_in): Conv2d(1, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (time_proj): Timesteps() (time_embedding): TimestepEmbedding( (linear_1): Linear(in_features=32, out_features=128, bias=True) (act): SiLU() (linear_2): Linear(in_features=128, out_features=128, bias=True) ) (down_blocks): ModuleList( (0): DownBlock2D( (resnets): ModuleList( (0): ResnetBlock2D( (norm1): GroupNorm(32, 32, eps=1e-05, affine=True) (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (time_emb_proj): Linear(in_features=128, out_features=32, bias=True) (norm2): GroupNorm(32, 32, eps=1e-05, affine=True) (dropout): Dropout(p=0.0, inplace=False) (conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (nonlinearity): SiLU() ) (1): ResnetBlock2D( (norm1): GroupNorm(32, 32, eps=1e-05, affine=True) (conv1): Con[...]<jupyter_text>As you can see, a little more going on! It also has significantly more parameters than our BasicUNet:<jupyter_code>sum([p.numel() for p in model.parameters()]) # 1.7M vs the ~309k parameters of the BasicUNet<jupyter_output><empty_output><jupyter_text>We can replicate the training shown above using this model in place of our original one. We need to pass both x and timestep to the model (here I always pass t=0 to show that it works without this timestep conditioning and to keep the sampling code easy, but you can also try feeding in `(amount*1000)` to get a timestep equivalent from the corruption amount). Lines changed are shown with `<<<` if you want to inspect the code.<jupyter_code>#@markdown Trying UNet2DModel instead of BasicUNet: # Dataloader (you can mess with batch size) batch_size = 128 train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True) # How many runs through the data should we do? n_epochs = 3 # Create the network net = UNet2DModel( sample_size=28, # the target image resolution in_channels=1, # the number of input channels, 3 for RGB images out_channels=1, # the number of output channels layers_per_block=2, # how many ResNet layers to use per UNet block block_out_channels=(32, 64, 64), # Roughly matching our basic unet example down_block_types=( "DownBlock2D", # a regular ResNet downsampling block "AttnDownBlock2D", # a ResNet downsampling block with spatial self-attention "AttnDownBlock2D", ), up_block_types=( "AttnUpBlock2D", "AttnUpBlock2D", # a ResNet upsampling block with spatial self-attention "UpBlock2D", # a regular ResNet upsampling block ), ) #<<< net.to(device) # Our loss finction loss_fn = nn.MSELoss() # The optimizer opt = torch.optim.Adam(net.parameters(), lr=1e-3) # Keeping a record of the losses for later viewing losses = [] # The training loop for epoch in range(n_epochs): for x, y in train_dataloader: # Get some data and prepare the corrupted version x = x.to(device) # Data on the GPU noise_amount = torch.rand(x.shape[0]).to(device) # Pick random noise amounts noisy_x = corrupt(x, noise_amount) # Create our noisy x # Get the model prediction pred = net(noisy_x, 0).sample #<<< Using timestep 0 always, adding .sample # Calculate the loss loss = loss_fn(pred, x) # How close is the output to the true 'clean' x? # Backprop and update the params: opt.zero_grad() loss.backward() opt.step() # Store the loss for later losses.append(loss.item()) # Print our the average of the loss values for this epoch: avg_loss = sum(losses[-len(train_dataloader):])/len(train_dataloader) print(f'Finished epoch {epoch}. Average loss for this epoch: {avg_loss:05f}') # Plot losses and some samples fig, axs = plt.subplots(1, 2, figsize=(12, 5)) # Losses axs[0].plot(losses) axs[0].set_ylim(0, 0.1) axs[0].set_title('Loss over time') # Samples n_steps = 40 x = torch.rand(64, 1, 28, 28).to(device) for i in range(n_steps): noise_amount = torch.ones((x.shape[0], )).to(device) * (1-(i/n_steps)) # Starting high going low with torch.no_grad(): pred = net(x, 0).sample mix_factor = 1/(n_steps - i) x = x*(1-mix_factor) + pred*mix_factor axs[1].imshow(torchvision.utils.make_grid(x.detach().cpu(), nrow=8)[0].clip(0, 1), cmap='Greys') axs[1].set_title('Generated Samples');<jupyter_output>Finished epoch 0. Average loss for this epoch: 0.018925 Finished epoch 1. Average loss for this epoch: 0.012785 Finished epoch 2. Average loss for this epoch: 0.011694<jupyter_text>This looks quite a bit better than our first set of results! You can explore tweaking the unet configuration or training for longer to get even better performance. The Corruption ProcessThe DDPM paper describes a corruption process that adds a small amount of noise for every 'timestep'. Given $x_{t-1}$ for some timestep, we can get the next (slightly more noisy) version $x_t$ with:$q(\mathbf{x}_t \vert \mathbf{x}_{t-1}) = \mathcal{N}(\mathbf{x}_t; \sqrt{1 - \beta_t} \mathbf{x}_{t-1}, \beta_t\mathbf{I}) \quadq(\mathbf{x}_{1:T} \vert \mathbf{x}_0) = \prod^T_{t=1} q(\mathbf{x}_t \vert \mathbf{x}_{t-1})$That is, we take $x_{t-1}$, scale it by $\sqrt{1 - \beta_t}$ and add noise scaled by $\beta_t$. This $\beta$ is defined for every t accoridng to some schedule, and determines how much noise is added per timestep. Now, we don't necessariy want to do this operation 500 times to get $x_{500}$ so we have another formula to get $x_t$ for any t given $x_0$: $\begin{aligned}q(\mathbf{x}_t \vert \mathbf{x}_0) &= \mathcal{N}(\mathbf{x}_t; \sqrt{\bar{\alpha}_t} \mathbf{x}_0, \sqrt{(1 - \bar{\alpha}_t)} \mathbf{I})\end{aligned}$ where $\bar{\alpha}_t = \prod_{i=1}^T \alpha_i$ and $\alpha_i = 1-\beta_i$The maths notation always looks scary! Luckily the scheduler handles all that for us (uncomment the next cell to check out the code). We can plot $\sqrt{\bar{\alpha}_t}$ (labelled as `sqrt_alpha_prod`) and $\sqrt{(1 - \bar{\alpha}_t)}$ (labelled as `sqrt_one_minus_alpha_prod`) to view how the input (x) and the noise are scaled and mixed across different timesteps:<jupyter_code>#??noise_scheduler.add_noise noise_scheduler = DDPMScheduler(num_train_timesteps=1000) plt.plot(noise_scheduler.alphas_cumprod.cpu() ** 0.5, label=r"${\sqrt{\bar{\alpha}_t}}$") plt.plot((1 - noise_scheduler.alphas_cumprod.cpu()) ** 0.5, label=r"$\sqrt{(1 - \bar{\alpha}_t)}$") plt.legend(fontsize="x-large");<jupyter_output><empty_output><jupyter_text>Initially, the noisy x is mostly x (sqrt_alpha_prod ~= 1) but over time the contribution of x drops and the noise component increases. Unlike our linear mix of x and noise according to `amount`, this one gets noisy relatively quickly. We can visualize this on some data:<jupyter_code>#@markdown visualize the DDPM noising process for different timesteps: # Noise a batch of images to view the effect fig, axs = plt.subplots(3, 1, figsize=(16, 10)) xb, yb = next(iter(train_dataloader)) xb = xb.to(device)[:8] xb = xb * 2. - 1. # Map to (-1, 1) print('X shape', xb.shape) # Show clean inputs axs[0].imshow(torchvision.utils.make_grid(xb[:8])[0].detach().cpu(), cmap='Greys') axs[0].set_title('Clean X') # Add noise with scheduler timesteps = torch.linspace(0, 999, 8).long().to(device) noise = torch.randn_like(xb) # << NB: randn not rand noisy_xb = noise_scheduler.add_noise(xb, noise, timesteps) print('Noisy X shape', noisy_xb.shape) # Show noisy version (with and without clipping) axs[1].imshow(torchvision.utils.make_grid(noisy_xb[:8])[0].detach().cpu().clip(-1, 1), cmap='Greys') axs[1].set_title('Noisy X (clipped to (-1, 1)') axs[2].imshow(torchvision.utils.make_grid(noisy_xb[:8])[0].detach().cpu(), cmap='Greys') axs[2].set_title('Noisy X');<jupyter_output>X shape torch.Size([8, 1, 28, 28]) Noisy X shape torch.Size([8, 1, 28, 28])
diffusion-models-class/units/en/unit1/diffusion_models_from_scratch.ipynb/0
{ "file_path": "diffusion-models-class/units/en/unit1/diffusion_models_from_scratch.ipynb", "repo_id": "diffusion-models-class", "token_count": 8452 }
138
<jupyter_start><jupyter_text>Que peuvent faire les *transformers* ? Installez la bibliothèque 🤗 *Transformers* pour exécuter ce *notebook*.<jupyter_code>!pip install transformers[sentencepiece] from transformers import pipeline<jupyter_output><empty_output><jupyter_text>Analyse de sentiments<jupyter_code>classifier = pipeline("sentiment-analysis") classifier("I've been waiting for a HuggingFace course my whole life.") classifier = pipeline("sentiment-analysis", model="tblard/tf-allocine") classifier("J'ai attendu un cours d'HuggingFace toute ma vie.")<jupyter_output><empty_output><jupyter_text>Intéressant ! On observe que le résultat est négatif là où pour la version en anglais le résultat est positif.<jupyter_code>classifier( ["J'ai attendu un cours d'HuggingFace toute ma vie.", "Je déteste tellement ça !"] ) # pour classifier plusieurs phrases<jupyter_output><empty_output><jupyter_text>La phrase "J'ai attendu un cours d'HuggingFace toute ma vie." qui était précedemment négative devient à présent positive. Zéro shot classification<jupyter_code>classifier = pipeline("zero-shot-classification", model="BaptisteDoyen/camembert-base-xnli") classifier( "C'est un cours sur la bibliothèque Transformers", candidate_labels=["éducation", "politique", "affaires"], )<jupyter_output><empty_output><jupyter_text>Génération de texte<jupyter_code>generator = pipeline("text-generation", model="asi/gpt-fr-cased-small") generator("# Dans ce cours, nous vous enseignerons comment") generator = pipeline("text-generation", model="asi/gpt-fr-cased-small") generator( "# Dans ce cours, nous vous enseignerons comment", max_length=30, num_return_sequences=1, )<jupyter_output><empty_output><jupyter_text>Remplacement des mots masqués<jupyter_code>unmasker = pipeline("fill-mask", model="camembert-base") unmasker(" Ce cours vous apprendra tout sur les modèles <mask>.", top_k=2)<jupyter_output><empty_output><jupyter_text>Reconnaissance d'entités nommées<jupyter_code>ner = pipeline("ner", model="Jean-Baptiste/camembert-ner", grouped_entities=True) ner("Je m'appelle Sylvain et je travaille à Hugging Face à Brooklyn.")<jupyter_output><empty_output><jupyter_text>Réponse à des questions<jupyter_code>question_answerer = pipeline("question-answering", model="etalab-ia/camembert-base-squadFR-fquad-piaf") question_answerer( question="Où est-ce que je travaille ?", context="Je m'appelle Sylvain et je travaille à Hugging Face à Brooklyn.", )<jupyter_output><empty_output><jupyter_text>Résumé<jupyter_code>summarizer = pipeline("summarization", model="moussaKam/barthez-orangesum-abstract") summarizer( """ L'Amérique a changé de façon spectaculaire au cours des dernières années. Non seulement le nombre de diplômés dans les disciplines traditionnelles de l'ingénierie telles que le génie mécanique, civil, l'électricité, la chimie et l'aéronautique a diminué, mais dans la plupart des grandes universités américaines, les programmes d'études d'ingénierie se concentrent désormais sur et encouragent largement l'étude des sciences de l'ingénieur. Par conséquent, il y a de moins en moins d'offres dans les sujets d'ingénierie traitant de l'infrastructure, l'environnement et les questions connexes, et une plus grande concentration sur les sujets de haute technologie, qui soutiennent en grande partie des développements scientifiques de plus en plus complexes. Si cette dernière est importante, elle ne doit pas se faire au détriment de l'ingénierie plus traditionnelle. Les économies en développement rapide telles que la Chine et l'Inde, ainsi que d'autres pays industrialisés d'Europe et d'Asie, continuent d'encourager et de promouvoir l'enseignement de l'ingénierie. La Chine et l'Inde, respectivement, diplôment six et huit fois plus d'ingénieurs traditionnels que les États-Unis. Les autres pays industriels maintiennent au minimum leur production, tandis que l'Amérique souffre d'une baisse de plus en plus importante du nombre de diplômés en ingénierie et un manque d'ingénieurs bien formés. """ )<jupyter_output><empty_output><jupyter_text>Traduction<jupyter_code>translator = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr") translator("This course is produced by Hugging Face.")<jupyter_output><empty_output>
notebooks/course/fr/chapter1/section3.ipynb/0
{ "file_path": "notebooks/course/fr/chapter1/section3.ipynb", "repo_id": "notebooks", "token_count": 1607 }
139
<jupyter_start><jupyter_text>Finetuner un modèle avec l'API Trainer Installez les bibliothèques 🤗 Transformers et 🤗 Datasets pour exécuter ce notebook.<jupyter_code>!pip install datasets transformers[sentencepiece] from datasets import load_dataset from transformers import AutoTokenizer, DataCollatorWithPadding raw_datasets = load_dataset("paws-x", "fr") checkpoint = "camembert-base" tokenizer = AutoTokenizer.from_pretrained(checkpoint) def tokenize_function(example): return tokenizer(example["sentence1"], example["sentence2"], truncation=True) tokenized_datasets = raw_datasets.map(tokenize_function, batched=True) data_collator = DataCollatorWithPadding(tokenizer=tokenizer) from transformers import TrainingArguments training_args = TrainingArguments("test-trainer") from transformers import AutoModelForSequenceClassification model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) from transformers import Trainer trainer = Trainer( model, training_args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation"], data_collator=data_collator, tokenizer=tokenizer, ) trainer.train() # Attention, une epoch prend 12h ! predictions = trainer.predict(tokenized_datasets["validation"]) print(predictions.predictions.shape, predictions.label_ids.shape) import numpy as np preds = np.argmax(predictions.predictions, axis=-1) from datasets import load_metric metric = load_metric("glue", "mrpc") metric.compute(predictions=preds, references=predictions.label_ids) def compute_metrics(eval_preds): metric = load_metric("glue", "mrpc") logits, labels = eval_preds predictions = np.argmax(logits, axis=-1) return metric.compute(predictions=predictions, references=labels) training_args = TrainingArguments("test-trainer", evaluation_strategy="epoch") model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) trainer = Trainer( model, training_args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation"], data_collator=data_collator, tokenizer=tokenizer, compute_metrics=compute_metrics, )<jupyter_output><empty_output>
notebooks/course/fr/chapter3/section3.ipynb/0
{ "file_path": "notebooks/course/fr/chapter3/section3.ipynb", "repo_id": "notebooks", "token_count": 754 }
140
<jupyter_start><jupyter_text>Entraîner un modèle de langage causal de zéro (PyTorch)Ici nous entraînons un modèle à générer du code Python. Le Python utilisant des fonctions basées sur des mots anglais, nous gardons un gpt-2 anglais dans l'optique d'obtenir de meilleures performances que ce que l'on pourrait s'attendre en utilisant un gpt-2 en français. Installez les bibliothèques 🤗 *Datasets* et 🤗 *Transformers* pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece] !pip install accelerate # Pour exécuter l'entraînement sur TPU, vous devez décommenter la ligne suivante : # !pip install cloud-tpu-client==0.10 torch==1.9.0 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.9-cp37-cp37m-linux_x86_64.whl !apt install git-lfs<jupyter_output><empty_output><jupyter_text>Vous aurez besoin de configurer git, adaptez votre email et votre nom dans la cellule suivante.<jupyter_code>!git config --global user.email "[email protected]" !git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Vous devrez également être connecté au Hub d'Hugging Face. Exécutez ce qui suit et entrez vos informations d'identification.<jupyter_code>from huggingface_hub import notebook_login notebook_login() def any_keyword_in_string(string, keywords): for keyword in keywords: if keyword in string: return True return False filters = ["pandas", "sklearn", "matplotlib", "seaborn"] example_1 = "import numpy as np" example_2 = "import pandas as pd" print( any_keyword_in_string(example_1, filters), any_keyword_in_string(example_2, filters) ) from collections import defaultdict from tqdm import tqdm from datasets import Dataset def filter_streaming_dataset(dataset, filters): filtered_dict = defaultdict(list) total = 0 for sample in tqdm(iter(dataset)): total += 1 if any_keyword_in_string(sample["content"], filters): for k, v in sample.items(): filtered_dict[k].append(v) print(f"{len(filtered_dict['content'])/total:.2%} of data after filtering.") return Dataset.from_dict(filtered_dict) # Cette cellule prendra beaucoup de temps à s'exécuter, donc vous devriez la sauter et aller à la suivante ! from datasets import load_dataset split = "train" # "valid" filters = ["pandas", "sklearn", "matplotlib", "seaborn"] data = load_dataset(f"transformersbook/codeparrot-{split}", split=split, streaming=True) filtered_data = filter_streaming_dataset(data, filters) from datasets import load_dataset, DatasetDict ds_train = load_dataset("huggingface-course/codeparrot-ds-train", split="train") ds_valid = load_dataset("huggingface-course/codeparrot-ds-valid", split="validation") raw_datasets = DatasetDict( { "train": ds_train, # .shuffle().select(range(50000)), "valid": ds_valid, # .shuffle().select(range(500)) } ) raw_datasets for key in raw_datasets["train"][0]: print(f"{key.upper()}: {raw_datasets['train'][0][key][:200]}") from transformers import AutoTokenizer context_length = 128 tokenizer = AutoTokenizer.from_pretrained("huggingface-course/code-search-net-tokenizer") outputs = tokenizer( raw_datasets["train"][:2]["content"], truncation=True, max_length=context_length, return_overflowing_tokens=True, return_length=True, ) print(f"Input IDs length: {len(outputs['input_ids'])}") print(f"Input chunk lengths: {(outputs['length'])}") print(f"Chunk mapping: {outputs['overflow_to_sample_mapping']}") def tokenize(element): outputs = tokenizer( element["content"], truncation=True, max_length=context_length, return_overflowing_tokens=True, return_length=True, ) input_batch = [] for length, input_ids in zip(outputs["length"], outputs["input_ids"]): if length == context_length: input_batch.append(input_ids) return {"input_ids": input_batch} tokenized_datasets = raw_datasets.map( tokenize, batched=True, remove_columns=raw_datasets["train"].column_names ) tokenized_datasets from transformers import AutoTokenizer, GPT2LMHeadModel, AutoConfig config = AutoConfig.from_pretrained( "gpt2", vocab_size=len(tokenizer), n_ctx=context_length, bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id, ) model = GPT2LMHeadModel(config) model_size = sum(t.numel() for t in model.parameters()) print(f"GPT-2 size: {model_size/1000**2:.1f}M parameters") from transformers import DataCollatorForLanguageModeling tokenizer.pad_token = tokenizer.eos_token data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False) out = data_collator([tokenized_datasets["train"][i] for i in range(5)]) for key in out: print(f"{key} shape: {out[key].shape}") from huggingface_hub import notebook_login notebook_login() from transformers import Trainer, TrainingArguments args = TrainingArguments( output_dir="codeparrot-ds", per_device_train_batch_size=32, per_device_eval_batch_size=32, evaluation_strategy="steps", eval_steps=5_000, logging_steps=5_000, gradient_accumulation_steps=8, num_train_epochs=1, weight_decay=0.1, warmup_steps=1_000, lr_scheduler_type="cosine", learning_rate=5e-4, save_steps=5_000, fp16=True, push_to_hub=True, ) trainer = Trainer( model=model, tokenizer=tokenizer, args=args, data_collator=data_collator, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], ) trainer.train() trainer.push_to_hub() import torch from transformers import pipeline device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") pipe = pipeline( "text-generation", model="huggingface-course/codeparrot-ds", device=device ) txt = """\ # create some data x = np.random.randn(100) y = np.random.randn(100) # create scatter plot with x, y """ print(pipe(txt, num_return_sequences=1)[0]["generated_text"]) txt = """\ # create some data x = np.random.randn(100) y = np.random.randn(100) # create dataframe from x and y """ print(pipe(txt, num_return_sequences=1)[0]["generated_text"]) txt = """\ # dataframe with profession, income and name df = pd.DataFrame({'profession': x, 'income':y, 'name': z}) # calculate the mean income per profession """ print(pipe(txt, num_return_sequences=1)[0]["generated_text"]) txt = """ # import random forest regressor from scikit-learn from sklearn.ensemble import RandomForestRegressor # fit random forest model with 300 estimators on X, y: """ print(pipe(txt, num_return_sequences=1)[0]["generated_text"]) keytoken_ids = [] for keyword in [ "plt", "pd", "sk", "fit", "predict", " plt", " pd", " sk", " fit", " predict", "testtest", ]: ids = tokenizer([keyword]).input_ids[0] if len(ids) == 1: keytoken_ids.append(ids[0]) else: print(f"Keyword has not single token: {keyword}") from torch.nn import CrossEntropyLoss import torch def keytoken_weighted_loss(inputs, logits, keytoken_ids, alpha=1.0): # Décalage pour que tokens < n prédise n shift_labels = inputs[..., 1:].contiguous() shift_logits = logits[..., :-1, :].contiguous() # Calculer la perte par token loss_fct = CrossEntropyLoss(reduce=False) loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) # Redimensionnement et perte moyenne par échantillon loss_per_sample = loss.view(shift_logits.size(0), shift_logits.size(1)).mean(axis=1) # Calculer et échelonner la pondération weights = torch.stack([(inputs == kt).float() for kt in keytoken_ids]).sum( axis=[0, 2] ) weights = alpha * (1.0 + weights) # Calculer la moyenne pondérée weighted_loss = (loss_per_sample * weights).mean() return weighted_loss from torch.utils.data.dataloader import DataLoader tokenized_dataset.set_format("torch") train_dataloader = DataLoader(tokenized_dataset["train"], batch_size=32, shuffle=True) eval_dataloader = DataLoader(tokenized_dataset["valid"], batch_size=32) weight_decay = 0.1 def get_grouped_params(model, no_decay=["bias", "LayerNorm.weight"]): params_with_wd, params_without_wd = [], [] for n, p in model.named_parameters(): if any(nd in n for nd in no_decay): params_without_wd.append(p) else: params_with_wd.append(p) return [ {"params": params_with_wd, "weight_decay": weight_decay}, {"params": params_without_wd, "weight_decay": 0.0}, ] def evaluate(): model.eval() losses = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(batch["input_ids"], labels=batch["input_ids"]) losses.append(accelerator.gather(outputs.loss)) loss = torch.mean(torch.cat(losses)) try: perplexity = torch.exp(loss) except OverflowError: perplexity = float("inf") return loss.item(), perplexity.item() model = GPT2LMHeadModel(config) from torch.optim import AdamW optimizer = AdamW(get_grouped_params(model), lr=5e-4) from accelerate import Accelerator accelerator = Accelerator(fp16=True) model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) from transformers import get_scheduler num_train_epochs = 1 num_update_steps_per_epoch = len(train_dataloader) num_training_steps = num_train_epochs * num_update_steps_per_epoch lr_scheduler = get_scheduler( name="linear", optimizer=optimizer, num_warmup_steps=1_000, num_training_steps=num_training_steps, ) from huggingface_hub import Repository, get_full_repo_name model_name = "codeparrot-ds-accelerate" repo_name = get_full_repo_name(model_name) repo_name output_dir = "codeparrot-ds-accelerate" repo = Repository(output_dir, clone_from=repo_name) evaluate() from tqdm.notebook import tqdm gradient_accumulation_steps = 8 eval_steps = 5_000 model.train() completed_steps = 0 for epoch in range(num_train_epochs): for step, batch in tqdm( enumerate(train_dataloader, start=1), total=num_training_steps ): logits = model(batch["input_ids"]).logits loss = keytoken_weighted_loss(batch["input_ids"], logits, keytoken_ids) if step % 100 == 0: accelerator.print( { "lr": get_lr(), "samples": step * samples_per_step, "steps": completed_steps, "loss/train": loss.item() * gradient_accumulation_steps, } ) loss = loss / gradient_accumulation_steps accelerator.backward(loss) if step % gradient_accumulation_steps == 0: accelerator.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() lr_scheduler.step() optimizer.zero_grad() completed_steps += 1 if (step % (eval_steps * gradient_accumulation_steps)) == 0: eval_loss, perplexity = evaluate() accelerator.print({"loss/eval": eval_loss, "perplexity": perplexity}) model.train() accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(output_dir, save_function=accelerator.save) if accelerator.is_main_process: tokenizer.save_pretrained(output_dir) repo.push_to_hub( commit_message=f"Training in progress step {step}", blocking=False )<jupyter_output><empty_output>
notebooks/course/fr/chapter7/section6_pt.ipynb/0
{ "file_path": "notebooks/course/fr/chapter7/section6_pt.ipynb", "repo_id": "notebooks", "token_count": 4772 }
141
<jupyter_start><jupyter_text>Textual-inversion fine-tuning for Stable Diffusion using d🧨ffusers This notebook shows how to "teach" Stable Diffusion a new concept via textual-inversion using 🤗 Hugging Face [🧨 Diffusers library](https://github.com/huggingface/diffusers). _By using just 3-5 images you can teach new concepts to Stable Diffusion and personalize the model on your own images_ For a general introduction to the Stable Diffusion model please refer to this [colab](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb). Initial setup<jupyter_code>#@title Install the required libs !pip install -U -qq git+https://github.com/huggingface/diffusers.git !pip install -qq accelerate transformers ftfy #@title [Optional] Install xformers for faster and memory efficient training #@markdown Acknowledgement: The xformers wheel are taken from [TheLastBen/fast-stable-diffusion](https://github.com/TheLastBen/fast-stable-diffusion). Thanks a lot for building these wheels! %%time !pip install -U --pre triton from subprocess import getoutput from IPython.display import HTML from IPython.display import clear_output import time s = getoutput('nvidia-smi') if 'T4' in s: gpu = 'T4' elif 'P100' in s: gpu = 'P100' elif 'V100' in s: gpu = 'V100' elif 'A100' in s: gpu = 'A100' while True: try: gpu=='T4'or gpu=='P100'or gpu=='V100'or gpu=='A100' break except: pass print('[1;31mit seems that your GPU is not supported at the moment') time.sleep(5) if (gpu=='T4'): %pip install -q https://github.com/TheLastBen/fast-stable-diffusion/raw/main/precompiled/T4/xformers-0.0.13.dev0-py3-none-any.whl elif (gpu=='P100'): %pip install -q https://github.com/TheLastBen/fast-stable-diffusion/raw/main/precompiled/P100/xformers-0.0.13.dev0-py3-none-any.whl elif (gpu=='V100'): %pip install -q https://github.com/TheLastBen/fast-stable-diffusion/raw/main/precompiled/V100/xformers-0.0.13.dev0-py3-none-any.whl elif (gpu=='A100'): %pip install -q https://github.com/TheLastBen/fast-stable-diffusion/raw/main/precompiled/A100/xformers-0.0.13.dev0-py3-none-any.whl #@title [Optional] Login to the Hugging Face Hub #@markdown Add a token with the "Write Access" role to be able to add your trained concept to the [Library of Concepts](https://huggingface.co/sd-concepts-library) from huggingface_hub import notebook_login notebook_login() #@title Import required libraries import argparse import itertools import math import os import random import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint from torch.utils.data import Dataset import PIL from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import AutoencoderKL, DDPMScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker from PIL import Image from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer def image_grid(imgs, rows, cols): assert len(imgs) == rows*cols w, h = imgs[0].size grid = Image.new('RGB', size=(cols*w, rows*h)) grid_w, grid_h = grid.size for i, img in enumerate(imgs): grid.paste(img, box=(i%cols*w, i//cols*h)) return grid<jupyter_output><empty_output><jupyter_text>Settings for teaching your new concept<jupyter_code>#@markdown `pretrained_model_name_or_path` which Stable Diffusion checkpoint you want to use pretrained_model_name_or_path = "stabilityai/stable-diffusion-2" #@param ["stabilityai/stable-diffusion-2", "stabilityai/stable-diffusion-2-base", "CompVis/stable-diffusion-v1-4", "runwayml/stable-diffusion-v1-5"] {allow-input: true}<jupyter_output><empty_output><jupyter_text>Get the training images: Download the images from the internet and save them locally.You can also upload the images to colab or load from google drive, please check the next section if you want to use that.<jupyter_code>#@markdown Add here the URLs to the images of the concept you are adding. 3-5 should be fine urls = [ "https://huggingface.co/datasets/valhalla/images/resolve/main/2.jpeg", "https://huggingface.co/datasets/valhalla/images/resolve/main/3.jpeg", "https://huggingface.co/datasets/valhalla/images/resolve/main/5.jpeg", "https://huggingface.co/datasets/valhalla/images/resolve/main/6.jpeg", ## You can add additional images here ] #@title Download import requests import glob from io import BytesIO def download_image(url): try: response = requests.get(url) except: return None return Image.open(BytesIO(response.content)).convert("RGB") images = list(filter(None,[download_image(url) for url in urls])) save_path = "./my_concept" if not os.path.exists(save_path): os.mkdir(save_path) [image.save(f"{save_path}/{i}.jpeg") for i, image in enumerate(images)]<jupyter_output><empty_output><jupyter_text>Load images from local folder or google driveYou can also load your own training images from google drive or upload them to colab usingthe files taband then provide the path to the directory containing images. *Make sure that the directory only contains images as the following cells will read all the files from the provided directory.*<jupyter_code>from google.colab import drive drive.mount('/content/gdrive') #@markdown `images_path` is a path to directory containing the training images. It could images_path = "" #@param {type:"string"} while not os.path.exists(str(images_path)): print('The images_path specified does not exist, use the colab file explorer to copy the path :') images_path=input("") save_path = images_path<jupyter_output>The images_path specified does not exist, use the colab file explorer to copy the path : ./cat_toy<jupyter_text>Setup and check the images you have just added<jupyter_code>images = [] for file_path in os.listdir(save_path): try: image_path = os.path.join(save_path, file_path) images.append(Image.open(image_path).resize((512, 512))) except: print(f"{image_path} is not a valid image, please make sure to remove this file from the directory otherwise the training could fail.") image_grid(images, 1, len(images)) #@title Settings for your newly created concept #@markdown `what_to_teach`: what is it that you are teaching? `object` enables you to teach the model a new object to be used, `style` allows you to teach the model a new style one can use. what_to_teach = "object" #@param ["object", "style"] #@markdown `placeholder_token` is the token you are going to use to represent your new concept (so when you prompt the model, you will say "A `<my-placeholder-token>` in an amusement park"). We use angle brackets to differentiate a token from other words/tokens, to avoid collision. placeholder_token = "<cat-toy>" #@param {type:"string"} #@markdown `initializer_token` is a word that can summarise what your new concept is, to be used as a starting point initializer_token = "toy" #@param {type:"string"}<jupyter_output><empty_output><jupyter_text>Teach the model a new concept (fine-tuning with textual inversion)Execute this this sequence of cells to run the training process. The whole process may take from 1-4 hours. (Open this block if you are interested in how this process works under the hood or if you want to change advanced training settings or hyperparameters) Create Dataset<jupyter_code>#@title Setup the prompt templates for training imagenet_templates_small = [ "a photo of a {}", "a rendering of a {}", "a cropped photo of the {}", "the photo of a {}", "a photo of a clean {}", "a photo of a dirty {}", "a dark photo of the {}", "a photo of my {}", "a photo of the cool {}", "a close-up photo of a {}", "a bright photo of the {}", "a cropped photo of a {}", "a photo of the {}", "a good photo of the {}", "a photo of one {}", "a close-up photo of the {}", "a rendition of the {}", "a photo of the clean {}", "a rendition of a {}", "a photo of a nice {}", "a good photo of a {}", "a photo of the nice {}", "a photo of the small {}", "a photo of the weird {}", "a photo of the large {}", "a photo of a cool {}", "a photo of a small {}", ] imagenet_style_templates_small = [ "a painting in the style of {}", "a rendering in the style of {}", "a cropped painting in the style of {}", "the painting in the style of {}", "a clean painting in the style of {}", "a dirty painting in the style of {}", "a dark painting in the style of {}", "a picture in the style of {}", "a cool painting in the style of {}", "a close-up painting in the style of {}", "a bright painting in the style of {}", "a cropped painting in the style of {}", "a good painting in the style of {}", "a close-up painting in the style of {}", "a rendition in the style of {}", "a nice painting in the style of {}", "a small painting in the style of {}", "a weird painting in the style of {}", "a large painting in the style of {}", ] #@title Setup the dataset class TextualInversionDataset(Dataset): def __init__( self, data_root, tokenizer, learnable_property="object", # [object, style] size=512, repeats=100, interpolation="bicubic", flip_p=0.5, set="train", placeholder_token="*", center_crop=False, ): self.data_root = data_root self.tokenizer = tokenizer self.learnable_property = learnable_property self.size = size self.placeholder_token = placeholder_token self.center_crop = center_crop self.flip_p = flip_p self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] self.num_images = len(self.image_paths) self._length = self.num_images if set == "train": self._length = self.num_images * repeats self.interpolation = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, }[interpolation] self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) def __len__(self): return self._length def __getitem__(self, i): example = {} image = Image.open(self.image_paths[i % self.num_images]) if not image.mode == "RGB": image = image.convert("RGB") placeholder_string = self.placeholder_token text = random.choice(self.templates).format(placeholder_string) example["input_ids"] = self.tokenizer( text, padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids[0] # default to score-sde preprocessing img = np.array(image).astype(np.uint8) if self.center_crop: crop = min(img.shape[0], img.shape[1]) h, w, = ( img.shape[0], img.shape[1], ) img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] image = Image.fromarray(img) image = image.resize((self.size, self.size), resample=self.interpolation) image = self.flip_transform(image) image = np.array(image).astype(np.uint8) image = (image / 127.5 - 1.0).astype(np.float32) example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) return example<jupyter_output><empty_output><jupyter_text>Setting up the model<jupyter_code>#@title Load the tokenizer and add the placeholder token as a additional special token. tokenizer = CLIPTokenizer.from_pretrained( pretrained_model_name_or_path, subfolder="tokenizer", ) # Add the placeholder token in tokenizer num_added_tokens = tokenizer.add_tokens(placeholder_token) if num_added_tokens == 0: raise ValueError( f"The tokenizer already contains the token {placeholder_token}. Please pass a different" " `placeholder_token` that is not already in the tokenizer." ) #@title Get token ids for our placeholder and initializer token. This code block will complain if initializer string is not a single token # Convert the initializer_token, placeholder_token to ids token_ids = tokenizer.encode(initializer_token, add_special_tokens=False) # Check if initializer_token is a single token or a sequence of tokens if len(token_ids) > 1: raise ValueError("The initializer token must be a single token.") initializer_token_id = token_ids[0] placeholder_token_id = tokenizer.convert_tokens_to_ids(placeholder_token) #@title Load the Stable Diffusion model # Load models and create wrapper for stable diffusion # pipeline = StableDiffusionPipeline.from_pretrained(pretrained_model_name_or_path) # del pipeline text_encoder = CLIPTextModel.from_pretrained( pretrained_model_name_or_path, subfolder="text_encoder" ) vae = AutoencoderKL.from_pretrained( pretrained_model_name_or_path, subfolder="vae" ) unet = UNet2DConditionModel.from_pretrained( pretrained_model_name_or_path, subfolder="unet" )<jupyter_output><empty_output><jupyter_text>We have added the `placeholder_token` in the `tokenizer` so we resize the token embeddings here, this will a new embedding vector in the token embeddings for our `placeholder_token`<jupyter_code>text_encoder.resize_token_embeddings(len(tokenizer))<jupyter_output><empty_output><jupyter_text>Initialise the newly added placeholder token with the embeddings of the initializer token<jupyter_code>token_embeds = text_encoder.get_input_embeddings().weight.data token_embeds[placeholder_token_id] = token_embeds[initializer_token_id]<jupyter_output><empty_output><jupyter_text>In Textual-Inversion we only train the newly added embedding vector, so lets freeze rest of the model parameters here<jupyter_code>def freeze_params(params): for param in params: param.requires_grad = False # Freeze vae and unet freeze_params(vae.parameters()) freeze_params(unet.parameters()) # Freeze all parameters except for the token embeddings in text encoder params_to_freeze = itertools.chain( text_encoder.text_model.encoder.parameters(), text_encoder.text_model.final_layer_norm.parameters(), text_encoder.text_model.embeddings.position_embedding.parameters(), ) freeze_params(params_to_freeze)<jupyter_output><empty_output><jupyter_text>Creating our training data Let's create the Dataset and Dataloader<jupyter_code>train_dataset = TextualInversionDataset( data_root=save_path, tokenizer=tokenizer, size=vae.sample_size, placeholder_token=placeholder_token, repeats=100, learnable_property=what_to_teach, #Option selected above between object and style center_crop=False, set="train", ) def create_dataloader(train_batch_size=1): return torch.utils.data.DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True)<jupyter_output><empty_output><jupyter_text>Create noise_scheduler for training<jupyter_code>noise_scheduler = DDPMScheduler.from_config(pretrained_model_name_or_path, subfolder="scheduler")<jupyter_output>/usr/local/lib/python3.7/dist-packages/diffusers/utils/deprecation_utils.py:35: FutureWarning: It is deprecated to pass a pretrained model name or path to `from_config`.If you were trying to load a scheduler, please use <class 'diffusers.schedulers.scheduling_ddpm.DDPMScheduler'>.from_pretrained(...) instead. Otherwise, please make sure to pass a configuration dictionary instead. This functionality will be removed in v1.0.0. warnings.warn(warning + message, FutureWarning)<jupyter_text>Training Define hyperparameters for our trainingIf you are not happy with your results, you can tune the `learning_rate` and the `max_train_steps`<jupyter_code>#@title Setting up all training args hyperparameters = { "learning_rate": 5e-04, "scale_lr": True, "max_train_steps": 2000, "save_steps": 250, "train_batch_size": 4, "gradient_accumulation_steps": 1, "gradient_checkpointing": True, "mixed_precision": "fp16", "seed": 42, "output_dir": "sd-concept-output" } !mkdir -p sd-concept-output<jupyter_output><empty_output><jupyter_text>Train!<jupyter_code>#@title Training function logger = get_logger(__name__) def save_progress(text_encoder, placeholder_token_id, accelerator, save_path): logger.info("Saving embeddings") learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_id] learned_embeds_dict = {placeholder_token: learned_embeds.detach().cpu()} torch.save(learned_embeds_dict, save_path) def training_function(text_encoder, vae, unet): train_batch_size = hyperparameters["train_batch_size"] gradient_accumulation_steps = hyperparameters["gradient_accumulation_steps"] learning_rate = hyperparameters["learning_rate"] max_train_steps = hyperparameters["max_train_steps"] output_dir = hyperparameters["output_dir"] gradient_checkpointing = hyperparameters["gradient_checkpointing"] accelerator = Accelerator( gradient_accumulation_steps=gradient_accumulation_steps, mixed_precision=hyperparameters["mixed_precision"] ) if gradient_checkpointing: text_encoder.gradient_checkpointing_enable() unet.enable_gradient_checkpointing() train_dataloader = create_dataloader(train_batch_size) if hyperparameters["scale_lr"]: learning_rate = ( learning_rate * gradient_accumulation_steps * train_batch_size * accelerator.num_processes ) # Initialize the optimizer optimizer = torch.optim.AdamW( text_encoder.get_input_embeddings().parameters(), # only optimize the embeddings lr=learning_rate, ) text_encoder, optimizer, train_dataloader = accelerator.prepare( text_encoder, optimizer, train_dataloader ) weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move vae and unet to device vae.to(accelerator.device, dtype=weight_dtype) unet.to(accelerator.device, dtype=weight_dtype) # Keep vae in eval mode as we don't train it vae.eval() # Keep unet in train mode to enable gradient checkpointing unet.train() # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / gradient_accumulation_steps) num_train_epochs = math.ceil(max_train_steps / num_update_steps_per_epoch) # Train! total_batch_size = train_batch_size * accelerator.num_processes * gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Instantaneous batch size per device = {train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {gradient_accumulation_steps}") logger.info(f" Total optimization steps = {max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") global_step = 0 for epoch in range(num_train_epochs): text_encoder.train() for step, batch in enumerate(train_dataloader): with accelerator.accumulate(text_encoder): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample().detach() latents = latents * 0.18215 # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.num_train_timesteps, (bsz,), device=latents.device).long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states.to(weight_dtype)).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") loss = F.mse_loss(noise_pred, target, reduction="none").mean([1, 2, 3]).mean() accelerator.backward(loss) # Zero out the gradients for all token embeddings except the newly added # embeddings for the concept, as we only want to optimize the concept embeddings if accelerator.num_processes > 1: grads = text_encoder.module.get_input_embeddings().weight.grad else: grads = text_encoder.get_input_embeddings().weight.grad # Get the index for tokens that we want to zero the grads for index_grads_to_zero = torch.arange(len(tokenizer)) != placeholder_token_id grads.data[index_grads_to_zero, :] = grads.data[index_grads_to_zero, :].fill_(0) optimizer.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if global_step % hyperparameters["save_steps"] == 0: save_path = os.path.join(output_dir, f"learned_embeds-step-{global_step}.bin") save_progress(text_encoder, placeholder_token_id, accelerator, save_path) logs = {"loss": loss.detach().item()} progress_bar.set_postfix(**logs) if global_step >= max_train_steps: break accelerator.wait_for_everyone() # Create the pipeline using using the trained modules and save it. if accelerator.is_main_process: pipeline = StableDiffusionPipeline.from_pretrained( pretrained_model_name_or_path, text_encoder=accelerator.unwrap_model(text_encoder), tokenizer=tokenizer, vae=vae, unet=unet, ) pipeline.save_pretrained(output_dir) # Also save the newly trained embeddings save_path = os.path.join(output_dir, f"learned_embeds.bin") save_progress(text_encoder, placeholder_token_id, accelerator, save_path) import accelerate accelerate.notebook_launcher(training_function, args=(text_encoder, vae, unet)) for param in itertools.chain(unet.parameters(), text_encoder.parameters()): if param.grad is not None: del param.grad # free some memory torch.cuda.empty_cache()<jupyter_output><empty_output><jupyter_text>Run the code with your newly trained modelIf you have just trained your model with the code above, use the block below to run itTo save this concept for re-using, download the `learned_embeds.bin` file or save it on the library of concepts.Use the [Stable Conceptualizer notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) for inference with persistently saved pre-trained concepts<jupyter_code>#@title Save your newly created concept to the [library of concepts](https://huggingface.co/sd-concepts-library)? save_concept_to_public_library = True #@param {type:"boolean"} name_of_your_concept = "Cat toy" #@param {type:"string"} #@markdown `hf_token_write`: leave blank if you logged in with a token with `write access` in the [Initial Setup](#scrollTo=KbzZ9xe6dWwf). If not, [go to your tokens settings and create a write access token](https://huggingface.co/settings/tokens) hf_token_write = "" #@param {type:"string"} if(save_concept_to_public_library): from slugify import slugify from huggingface_hub import HfApi, HfFolder, CommitOperationAdd from huggingface_hub import create_repo repo_id = f"sd-concepts-library/{slugify(name_of_your_concept)}" output_dir = hyperparameters["output_dir"] if(not hf_token_write): with open(HfFolder.path_token, 'r') as fin: hf_token = fin.read(); else: hf_token = hf_token_write #Join the Concepts Library organization if you aren't part of it already !curl -X POST -H 'Authorization: Bearer '$hf_token -H 'Content-Type: application/json' https://huggingface.co/organizations/sd-concepts-library/share/VcLXJtzwwxnHYCkNMLpSJCdnNFZHQwWywv images_upload = os.listdir("my_concept") image_string = "" repo_id = f"sd-concepts-library/{slugify(name_of_your_concept)}" for i, image in enumerate(images_upload): image_string = f'''{image_string}![{placeholder_token} {i}](https://huggingface.co/{repo_id}/resolve/main/concept_images/{image}) ''' if(what_to_teach == "style"): what_to_teach_article = f"a `{what_to_teach}`" else: what_to_teach_article = f"an `{what_to_teach}`" readme_text = f'''--- license: mit base_model: {pretrained_model_name_or_path} --- ### {name_of_your_concept} on Stable Diffusion This is the `{placeholder_token}` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as {what_to_teach_article}: {image_string} ''' #Save the readme to a file readme_file = open("README.md", "w") readme_file.write(readme_text) readme_file.close() #Save the token identifier to a file text_file = open("token_identifier.txt", "w") text_file.write(placeholder_token) text_file.close() #Save the type of teached thing to a file type_file = open("type_of_concept.txt","w") type_file.write(what_to_teach) type_file.close() operations = [ CommitOperationAdd(path_in_repo="learned_embeds.bin", path_or_fileobj=f"{output_dir}/learned_embeds.bin"), CommitOperationAdd(path_in_repo="token_identifier.txt", path_or_fileobj="token_identifier.txt"), CommitOperationAdd(path_in_repo="type_of_concept.txt", path_or_fileobj="type_of_concept.txt"), CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"), ] create_repo(repo_id,private=True, token=hf_token) api = HfApi() api.create_commit( repo_id=repo_id, operations=operations, commit_message=f"Upload the concept {name_of_your_concept} embeds and token", token=hf_token ) api.upload_folder( folder_path=save_path, path_in_repo="concept_images", repo_id=repo_id, token=hf_token ) #@title Set up the pipeline from diffusers import DPMSolverMultistepScheduler pipe = StableDiffusionPipeline.from_pretrained( hyperparameters["output_dir"], scheduler=DPMSolverMultistepScheduler.from_pretrained(hyperparameters["output_dir"], subfolder="scheduler"), torch_dtype=torch.float16, ).to("cuda") #@title Run the Stable Diffusion pipeline #@markdown Don't forget to use the placeholder token in your prompt prompt = "a \u003Ccat-toy> inside ramen-bowl" #@param {type:"string"} num_samples = 2 #@param {type:"number"} num_rows = 1 #@param {type:"number"} all_images = [] for _ in range(num_rows): images = pipe([prompt] * num_samples, num_inference_steps=30, guidance_scale=7.5).images all_images.extend(images) grid = image_grid(all_images, num_rows, num_samples) grid<jupyter_output><empty_output>
notebooks/diffusers/sd_textual_inversion_training.ipynb/0
{ "file_path": "notebooks/diffusers/sd_textual_inversion_training.ipynb", "repo_id": "notebooks", "token_count": 10904 }
142
# this is a demo of inference of IDEFICS-9B which needs about 20GB of GPU memory import torch from transformers import IdeficsForVisionText2Text, AutoProcessor device = "cuda" if torch.cuda.is_available() else "cpu" checkpoint = "HuggingFaceM4/idefics-9b" #checkpoint = "HuggingFaceM4/tiny-random-idefics" model = IdeficsForVisionText2Text.from_pretrained(checkpoint, torch_dtype=torch.bfloat16).to(device) processor = AutoProcessor.from_pretrained(checkpoint) url = "https://hips.hearstapps.com/hmg-prod/images/cute-photos-of-cats-in-grass-1593184777.jpg" image = processor.image_processor.fetch_images(url) prompts = [ [ "User:", image, "Describe this image.\nAssistant: An image of two kittens in grass.\n", "User:", "https://hips.hearstapps.com/hmg-prod/images/dog-puns-1581708208.jpg", "Describe this image.\nAssistant:", ], [ "User:", "https://hips.hearstapps.com/hmg-prod/images/dog-puns-1581708208.jpg", "Describe this image.\nAssistant: An image of a dog wearing funny glasses.\n", "User:", "https://hips.hearstapps.com/hmg-prod/images/cute-photos-of-cats-in-grass-1593184777.jpg", "Describe this image.\nAssistant:", ], [ "User:", image, "Describe this image.\nAssistant: An image of two kittens in grass.\n", "User:", "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image1.jpeg", "Describe this image.\nAssistant:", ], [ "User:", "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image2.jpeg", "Describe this image.\nAssistant: An image of a dog.\n", "User:", "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image1.jpeg", "Describe this image.\nAssistant:", ], ] # batched mode inputs = processor(prompts, return_tensors="pt").to(device) # single sample mode #inputs = processor(prompts[0], return_tensors="pt").to(device) generated_ids = model.generate(**inputs, max_length=128) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True) for i,t in enumerate(generated_text): print(f"{i}:\n{t}\n")
notebooks/examples/idefics/inference.py/0
{ "file_path": "notebooks/examples/idefics/inference.py", "repo_id": "notebooks", "token_count": 980 }
143
<jupyter_start><jupyter_text>If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers as well as some other libraries. Uncomment the following cell and run it.<jupyter_code>#! pip install transformers evaluate datasets requests pandas sklearn<jupyter_output><empty_output><jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up here if you haven't already!) then execute the following cell and input your username and password:<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Then you need to install Git-LFS. Uncomment the following instructions:<jupyter_code># !apt install git-lfs<jupyter_output><empty_output><jupyter_text>We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("protein_language_modeling_notebook", framework="tensorflow")<jupyter_output><empty_output><jupyter_text>Fine-Tuning Protein Language Models In this notebook, we're going to do some transfer learning to fine-tune some large, pre-trained protein language models on tasks of interest. If that sentence feels a bit intimidating to you, don't panic - there's [a blog post](https://huggingface.co/blog/deep-learning-with-proteins) that explains the concepts here in much more detail.The specific model we're going to use is ESM-2, which is the state-of-the-art protein language model at the time of writing (November 2022). The citation for this model is [Lin et al, 2022](https://www.biorxiv.org/content/10.1101/2022.07.20.500902v1).There are several ESM-2 checkpoints with differing model sizes. Larger models will generally have better accuracy, but they require more GPU memory and will take much longer to train. The available ESM-2 checkpoints (at time of writing) are:| Checkpoint name | Num layers | Num parameters ||------------------------------|----|----------|| `esm2_t48_15B_UR50D` | 48 | 15B || `esm2_t36_3B_UR50D` | 36 | 3B | | `esm2_t33_650M_UR50D` | 33 | 650M | | `esm2_t30_150M_UR50D` | 30 | 150M | | `esm2_t12_35M_UR50D` | 12 | 35M | | `esm2_t6_8M_UR50D` | 6 | 8M | Note that the larger checkpoints may be very difficult to train without a large cloud GPU like an A100 or H100, and the largest 15B parameter checkpoint will probably be impossible to train on **any** single GPU! Also, note that memory usage for attention during training will scale as `O(batch_size * num_layers * seq_len^2)`, so larger models on long sequences will use quite a lot of memory! We will use the `esm2_t12_35M_UR50D` checkpoint for this notebook, which should train on any Colab instance or modern GPU.<jupyter_code>model_checkpoint = "facebook/esm2_t12_35M_UR50D"<jupyter_output><empty_output><jupyter_text>Sequence classification One of the most common tasks you can perform with a language model is **sequence classification**. In sequence classification, we classify an entire protein into a category, from a list of two or more possibilities. There's no limit on the number of categories you can use, or the specific problem you choose, as long as it's something the model could in theory infer from the raw protein sequence. To keep things simple for this example, though, let's try classifying proteins by their cellular localization - given their sequence, can we predict if they're going to be found in the cytosol (the fluid inside the cell) or embedded in the cell membrane? Data preparation In this section, we're going to gather some training data from UniProt. Our goal is to create a pair of lists: `sequences` and `labels`. `sequences` will be a list of protein sequences, which will just be strings like "MNKL...", where each letter represents a single amino acid in the complete protein. `labels` will be a list of the category for each sequence. The categories will just be integers, with 0 representing the first category, 1 representing the second and so on. In other words, if `sequences[i]` is a protein sequence then `labels[i]` should be its corresponding category. These will form the **training data** we're going to use to teach the model the task we want it to do.If you're adapting this notebook for your own use, this will probably be the main section you want to change! You can do whatever you want here, as long as you create those two lists by the end of it. If you want to follow along with this example, though, first we'll need to `import requests` and set up our query to UniProt.<jupyter_code>import requests query_url ="https://rest.uniprot.org/uniprotkb/stream?compressed=true&fields=accession%2Csequence%2Ccc_subcellular_location&format=tsv&query=%28%28organism_id%3A9606%29%20AND%20%28reviewed%3Atrue%29%20AND%20%28length%3A%5B80%20TO%20500%5D%29%29"<jupyter_output><empty_output><jupyter_text>This query URL might seem mysterious, but it isn't! To get it, we searched for `(organism_id:9606) AND (reviewed:true) AND (length:[80 TO 500])` on UniProt to get a list of reasonably-sized human proteins,then selected 'Download', and set the format to TSV and the columns to `Sequence` and `Subcellular location [CC]`, since those contain the data we care about for this task.Once that's done, selecting `Generate URL for API` gives you a URL you can pass to Requests. Alternatively, if you're not on Colab you can just download the data through the web interface and open the file locally.<jupyter_code>uniprot_request = requests.get(query_url)<jupyter_output><empty_output><jupyter_text>To get this data into Pandas, we use a `BytesIO` object, which Pandas will treat like a file. If you downloaded the data as a file you can skip this bit and just pass the filepath directly to `read_csv`.<jupyter_code>from io import BytesIO import pandas bio = BytesIO(uniprot_request.content) df = pandas.read_csv(bio, compression='gzip', sep='\t') df<jupyter_output><empty_output><jupyter_text>Nice! Now we have some proteins and their subcellular locations. Let's start filtering this down. First, let's ditch the columns without subcellular location information.<jupyter_code>df = df.dropna() # Drop proteins with missing columns<jupyter_output><empty_output><jupyter_text>Now we'll make one dataframe of proteins that contain `cytosol` or `cytoplasm` in their subcellular localization column, and a second that mentions the `membrane` or `cell membrane`. To ensure we don't get overlap, we ensure each dataframe only contains proteins that don't match the other search term.<jupyter_code>cytosolic = df['Subcellular location [CC]'].str.contains("Cytosol") | df['Subcellular location [CC]'].str.contains("Cytoplasm") membrane = df['Subcellular location [CC]'].str.contains("Membrane") | df['Subcellular location [CC]'].str.contains("Cell membrane") cytosolic_df = df[cytosolic & ~membrane] cytosolic_df membrane_df = df[membrane & ~cytosolic] membrane_df<jupyter_output><empty_output><jupyter_text>We're almost done! Now, let's make a list of sequences from each df and generate the associated labels. We'll use `0` as the label for cytosolic proteins and `1` as the label for membrane proteins.<jupyter_code>cytosolic_sequences = cytosolic_df["Sequence"].tolist() cytosolic_labels = [0 for protein in cytosolic_sequences] membrane_sequences = membrane_df["Sequence"].tolist() membrane_labels = [1 for protein in membrane_sequences]<jupyter_output><empty_output><jupyter_text>Now we can concatenate these lists together to get the `sequences` and `labels` lists that will form our final training data. Don't worry - they'll get shuffled during training!<jupyter_code>sequences = cytosolic_sequences + membrane_sequences labels = cytosolic_labels + membrane_labels # Quick check to make sure we got it right len(sequences) == len(labels)<jupyter_output><empty_output><jupyter_text>Phew! Splitting the data Since the data we're loading isn't prepared for us as a machine learning dataset, we'll have to split the data into train and test sets ourselves! We can use sklearn's function for that:<jupyter_code>from sklearn.model_selection import train_test_split train_sequences, test_sequences, train_labels, test_labels = train_test_split(sequences, labels, test_size=0.25, shuffle=True)<jupyter_output><empty_output><jupyter_text>Tokenizing the data All inputs to neural nets must be numerical. The process of converting strings into numerical indices suitable for a neural net is called **tokenization**. For natural language this can be quite complex, as usually the network's vocabulary will not contain every possible word, which means the tokenizer must handle splitting rarer words into pieces, as well as all the complexities of capitalization and unicode characters and so on.With proteins, however, things are very easy. In protein language models, each amino acid is converted to a single token. Every model on `transformers` comes with an associated `tokenizer` that handles tokenization for it, and protein language models are no different. Let's get our tokenizer!<jupyter_code>from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)<jupyter_output>2022-11-15 18:12:47.607429: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags. 2022-11-15 18:12:47.713337: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered 2022-11-15 18:12:48.149628: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory 2022-11-15 18:12:48.149671: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or [...]<jupyter_text>Let's try a single sequence to see what the outputs from our tokenizer look like:<jupyter_code>tokenizer(train_sequences[0])<jupyter_output><empty_output><jupyter_text>This looks good! We can see that our sequence has been converted into `input_ids`, which is the tokenized sequence, and an `attention_mask`. The attention mask handles the case when we have sequences of variable length - in those cases, the shorter sequences are padded with blank "padding" tokens, and the attention mask is padded with 0s to indicate that those tokens should be ignored by the model.So now, let's tokenize our whole dataset. Note that we don't need to do anything with the labels, as they're already in the format we need.<jupyter_code>train_tokenized = tokenizer(train_sequences) test_tokenized = tokenizer(test_sequences)<jupyter_output><empty_output><jupyter_text>Dataset creation Now we want to turn this data into a dataset that Keras can load samples from. We can use the HuggingFace `Dataset` class for this, which has convenience functions to wrap itself with a `tf.data.Dataset`, although there are a number of different approaches that you can take at this stage.<jupyter_code>from datasets import Dataset train_dataset = Dataset.from_dict(train_tokenized) test_dataset = Dataset.from_dict(test_tokenized) train_dataset<jupyter_output><empty_output><jupyter_text>This looks good, but we're missing our labels! Let's add those on as an extra column to the datasets.<jupyter_code>train_dataset = train_dataset.add_column("labels", train_labels) test_dataset = test_dataset.add_column("labels", test_labels) train_dataset<jupyter_output><empty_output><jupyter_text>Looks good! We're ready for training. Model loading Next, we want to load our model. Make sure to use exactly the same model as you used when loading the tokenizer, or your model might not understand the tokenization scheme you're using!<jupyter_code>from transformers import TFAutoModelForSequenceClassification num_labels = max(train_labels + test_labels) + 1 # Add 1 since 0 can be a label print("Num labels:", num_labels) model = TFAutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=num_labels)<jupyter_output>Num labels: 2<jupyter_text>These warnings are telling us that the model is discarding some weights that it used for language modelling (the `lm_head`) and adding some weights for sequence classification (the `classifier`). This is exactly what we expect when we want to fine-tune a language model on a sequence classification task!Next, let's prepare our `tf.data.Dataset`. This Dataset will stream samples from our Huggingface `Dataset` in a way that Keras natively understands - once we've created it, we can pass it straight to `model.fit()`!<jupyter_code>tf_train_set = model.prepare_tf_dataset( train_dataset, batch_size=8, shuffle=True, tokenizer=tokenizer ) tf_test_set = model.prepare_tf_dataset( test_dataset, batch_size=8, shuffle=False, tokenizer=tokenizer )<jupyter_output><empty_output><jupyter_text>You might wonder why we pass along the `tokenizer` when we already preprocessed our data. This is because we will use it one last time to make all the samples we gather the same length by applying padding, which requires knowing the model's preferences regarding padding (to the left or right? with which token?). The `tokenizer` has a `pad()` method that will do all of this right for us, and `prepare_tf_dataset` will use it.Now all we need to do is compile our model. We use the `AdamWeightDecay` optimizer, which usually performs a little better than the base `Adam` optimizer.<jupyter_code>from transformers import AdamWeightDecay model.compile(optimizer=AdamWeightDecay(2e-5), metrics=["accuracy"])<jupyter_output>No loss specified in compile() - the model's internal loss computation will be used as the loss. Don't panic - this is a common way to train TensorFlow models in Transformers! To disable this behaviour please pass a loss argument, or explicitly pass `loss=None` if you do not want your model to compute a loss.<jupyter_text>And now we can fit our model!<jupyter_code>model.fit(tf_train_set, validation_data=tf_test_set, epochs=3)<jupyter_output>Epoch 1/3 475/475 [==============================] - 98s 180ms/step - loss: 0.2361 - accuracy: 0.9250 - val_loss: 0.1592 - val_accuracy: 0.9504 Epoch 2/3 475/475 [==============================] - 84s 176ms/step - loss: 0.1393 - accuracy: 0.9534 - val_loss: 0.1941 - val_accuracy: 0.9417 Epoch 3/3 475/475 [==============================] - 83s 174ms/step - loss: 0.0987 - accuracy: 0.9647 - val_loss: 0.1547 - val_accuracy: 0.9504<jupyter_text>Nice! After three epochs we have a model accuracy of ~94%. Note that we didn't do a lot of work to filter the training data or tune hyperparameters for this experiment, and also that we used one of the smallest ESM-2 models. With a larger starting model and more effort to ensure that the training data categories were cleanly separable, accuracy could almost certainly go a lot higher!Now that we're done, let's see how we can upload our model to the HuggingFace Hub. This step is optional, but will allow us to easily share it with other researchers. If you encounter any errors here, make sure you ran the login cell at the top of the notebook! First, let's set a couple of properties on our model. This is optional, but will ensure the model knows the names of its categories, rather than just outputting "0" or "1".<jupyter_code>model.label2id = {"cytosol": 0, "membrane": 1} model.id2label = {val: key for key, val in model.label2id.items()}<jupyter_output><empty_output><jupyter_text>Now we can push it to the hub as simply as...<jupyter_code>model_name = model_checkpoint.split('/')[-1] finetuned_model_name = f"{model_name}-finetuned-cytosol-membrane-classification" model.push_to_hub(finetuned_model_name) tokenizer.push_to_hub(finetuned_model_name)<jupyter_output><empty_output><jupyter_text>If you used the code above, you can now share this model with all your friends, family or favorite pets: they can all load it with the identifier `"your-username/the-name-you-picked"` so for instance:```pythonfrom transformers import TFAutoModelForSequenceClassificationmodel = TFAutoModelForSequenceClassification.from_pretrained("your-username/my-awesome-model")``` *** Token classification Another common language model task is **token classification**. In this task, instead of classifying the whole sequence into a single category, we categorize each token (amino acid, in this case!) into one or more categories. This kind of model could be useful for:- Predicting secondary structure- Predicting buried vs. exposed residues- Predicting residues that will receive post-translational modifications- Predicting residues involved in binding pockets or active sites- Probably several other things, it's been a while since I was a postdoc Data preparation In this section, we're going to gather some training data from UniProt. As in the sequence classification example, we aim to create two lists: `sequences` and `labels`. Unlike in that example, however, the `labels` are more than just single integers. Instead, the label for each sample will be **one integer per token in the input**. This should make sense - when we do token classification, different tokens in the input may have different categories!To demonstrate token classification, we're going to go back to UniProt and get some data on protein secondary structures. As above, this will probably the main section you want to change when adapting this code to your own problems.<jupyter_code>import requests query_url ="https://rest.uniprot.org/uniprotkb/stream?compressed=true&fields=accession%2Csequence%2Cft_strand%2Cft_helix&format=tsv&query=%28%28organism_id%3A9606%29%20AND%20%28reviewed%3Atrue%29%20AND%20%28length%3A%5B80%20TO%20500%5D%29%29"<jupyter_output><empty_output><jupyter_text>This time, our UniProt search was `(organism_id:9606) AND (reviewed:true) AND (length:[100 TO 1000])` as it was in the first example, but instead of `Subcellular location [CC]` we take the `Helix` and `Beta strand` columns, as they contain the secondary structure information we want.<jupyter_code>uniprot_request = requests.get(query_url)<jupyter_output><empty_output><jupyter_text>To get this data into Pandas, we use a `BytesIO` object, which Pandas will treat like a file. If you downloaded the data as a file you can skip this bit and just pass the filepath directly to `read_csv`.<jupyter_code>from io import BytesIO import pandas bio = BytesIO(uniprot_request.content) df = pandas.read_csv(bio, compression='gzip', sep='\t') df<jupyter_output><empty_output><jupyter_text>Since not all proteins have this structural information, we discard proteins that have no annotated beta strands or alpha helices.<jupyter_code>no_structure_rows = df["Beta strand"].isna() & df["Helix"].isna() df = df[~no_structure_rows] df<jupyter_output><empty_output><jupyter_text>Well, this works, but that data still isn't in a clean format that we can use to build our labels. Let's take a look at one sample to see what exactly we're dealing with:<jupyter_code>df.iloc[0]["Helix"]<jupyter_output><empty_output><jupyter_text>We'll need to use a [regex](https://docs.python.org/3/howto/regex.html) to pull out each segment that's marked as being a STRAND or HELIX. What we're asking for is a list of everywhere we see the word STRAND or HELIX followed by two numbers separated by two dots. In each case where this pattern is found, we tell the regex to extract the two numbers as a tuple for us.<jupyter_code>import re strand_re = r"STRAND\s(\d+)\.\.(\d+)\;" helix_re = r"HELIX\s(\d+)\.\.(\d+)\;" re.findall(helix_re, df.iloc[0]["Helix"])<jupyter_output><empty_output><jupyter_text>Looks good! We can use this to build our training data. Recall that the **labels** need to be a list or array of integers that's the same length as the input sequence. We're going to use 0 to indicate residues without any annotated structure, 1 for residues in an alpha helix, and 2 for residues in a beta strand. To build that, we'll start with an array of all 0s, and then fill in values based on the positions that our regex pulls out of the UniProt results.We'll use NumPy arrays rather than lists here, since these allow [slice assignment](https://numpy.org/doc/stable/user/basics.indexing.htmlassigning-values-to-indexed-arrays), which will be a lot simpler than editing a list of integers. Note also that UniProt annotates residues starting from 1 (unlike Python, which starts from 0), and region annotations are inclusive (so 1..3 means residues 1, 2 and 3). To turn these into Python slices, we subtract 1 from the start of each annotation, but not the end.<jupyter_code>import numpy as np def build_labels(sequence, strands, helices): # Start with all 0s labels = np.zeros(len(sequence), dtype=np.int64) if isinstance(helices, float): # Indicates missing (NaN) found_helices = [] else: found_helices = re.findall(helix_re, helices) for helix_start, helix_end in found_helices: helix_start = int(helix_start) - 1 helix_end = int(helix_end) assert helix_end <= len(sequence) labels[helix_start: helix_end] = 1 # Helix category if isinstance(strands, float): # Indicates missing (NaN) found_strands = [] else: found_strands = re.findall(strand_re, strands) for strand_start, strand_end in found_strands: strand_start = int(strand_start) - 1 strand_end = int(strand_end) assert strand_end <= len(sequence) labels[strand_start: strand_end] = 2 # Strand category return labels<jupyter_output><empty_output><jupyter_text>Now we've defined a helper function, let's build our lists of sequences and labels:<jupyter_code>sequences = [] labels = [] for row_idx, row in df.iterrows(): row_labels = build_labels(row["Sequence"], row["Beta strand"], row["Helix"]) sequences.append(row["Sequence"]) labels.append(row_labels)<jupyter_output><empty_output><jupyter_text>Creating our dataset Nice! Now we'll split and tokenize the data, and then create datasets - I'll go through this quite quickly here, since it's identical to how we did it in the sequence classification example above.<jupyter_code>from sklearn.model_selection import train_test_split train_sequences, test_sequences, train_labels, test_labels = train_test_split(sequences, labels, test_size=0.25, shuffle=True) from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) train_tokenized = tokenizer(train_sequences) test_tokenized = tokenizer(test_sequences) from datasets import Dataset train_dataset = Dataset.from_dict(train_tokenized) test_dataset = Dataset.from_dict(test_tokenized) train_dataset = train_dataset.add_column("labels", train_labels) test_dataset = test_dataset.add_column("labels", test_labels)<jupyter_output><empty_output><jupyter_text>Model loading The key difference here with the above example is that we use `TFAutoModelForTokenClassification` instead of `TFAutoModelForSequenceClassification`. We will also need a `data_collator` this time, as we're in the slightly more complex case where both inputs and labels must be padded in each batch.<jupyter_code>from transformers import TFAutoModelForTokenClassification num_labels = 3 model = TFAutoModelForTokenClassification.from_pretrained(model_checkpoint, num_labels=num_labels) from transformers import DataCollatorForTokenClassification data_collator = DataCollatorForTokenClassification(tokenizer, return_tensors="np")<jupyter_output><empty_output><jupyter_text>Now we create our `tf.data.Dataset` objects as before. Remember to pass the data collator, though! Note that when you pass a data collator, there's no need to pass your tokenizer, as the data collator is handling padding for us.<jupyter_code>tf_train_set = model.prepare_tf_dataset( train_dataset, batch_size=8, shuffle=True, collate_fn=data_collator ) tf_test_set = model.prepare_tf_dataset( test_dataset, batch_size=8, shuffle=False, collate_fn=data_collator )<jupyter_output>/home/matt/PycharmProjects/transformers/src/transformers/tokenization_utils_base.py:715: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray. tensor = as_tensor(value)<jupyter_text>Our metrics are bit more complex than in the sequence classification task, as we need to ignore padding tokens (those where the label is `-100`). This means we'll need our own metric function where we only compute accuracy on non-padding tokens.<jupyter_code>from transformers import AdamWeightDecay import tensorflow as tf def masked_accuracy(y_true, y_pred): predictions = tf.math.argmax(y_pred, axis=-1) # Highest logit corresponds to predicted category numerator = tf.math.count_nonzero((predictions == tf.cast(y_true, predictions.dtype)) & (y_true != -100), dtype=tf.float32) denominator = tf.math.count_nonzero(y_true != -100, dtype=tf.float32) return numerator / denominator model.compile(optimizer=AdamWeightDecay(2e-5), metrics=[masked_accuracy])<jupyter_output>No loss specified in compile() - the model's internal loss computation will be used as the loss. Don't panic - this is a common way to train TensorFlow models in Transformers! To disable this behaviour please pass a loss argument, or explicitly pass `loss=None` if you do not want your model to compute a loss.<jupyter_text>And now we're ready to train our model!<jupyter_code>model.fit(tf_train_set, validation_data=tf_test_set, epochs=3)<jupyter_output>Epoch 1/3 366/366 [==============================] - 78s 184ms/step - loss: 0.5809 - masked_accuracy: 0.7502 - val_loss: 0.4764 - val_masked_accuracy: 0.8023 Epoch 2/3 366/366 [==============================] - 65s 177ms/step - loss: 0.4534 - masked_accuracy: 0.8132 - val_loss: 0.4564 - val_masked_accuracy: 0.8115 Epoch 3/3 366/366 [==============================] - 64s 176ms/step - loss: 0.4108 - masked_accuracy: 0.8325 - val_loss: 0.4586 - val_masked_accuracy: 0.8119<jupyter_text>This definitely seems harder than the first task, but we still attain a very respectable accuracy. Remember that to keep this demo lightweight, we used one of the smallest ESM models, focused on human proteins only and didn't put a lot of work into making sure we only included completely-annotated proteins in our training set. With a bigger model and a cleaner, broader training set, accuracy on this task could definitely go a lot higher! Now, let's push this model to the hub as we did before, while also setting the category labels appropriately.<jupyter_code>model.label2id = {"unstructured": 0, "helix": 1, "strand": 2} model.id2label = {val: key for key, val in model.label2id.items()} model_name = model_checkpoint.split('/')[-1] finetuned_model_name = f"{model_name}-finetuned-secondary-structure-classification" model.push_to_hub(finetuned_model_name) tokenizer.push_to_hub(finetuned_model_name)<jupyter_output><empty_output>
notebooks/examples/protein_language_modeling-tf.ipynb/0
{ "file_path": "notebooks/examples/protein_language_modeling-tf.ipynb", "repo_id": "notebooks", "token_count": 8412 }
144
<jupyter_start><jupyter_text>Quantizing a model during fine-tuning with Intel Neural Compressor (INC) for text classification tasks This notebook shows how to apply quantization aware training, using the [Intel Neural Compressor](https://github.com/intel/neural-compressor) (INC) library, for any tasks of the GLUE benchmark. This is made possible thanks to 🤗 [Optimum Intel](https://github.com/huggingface/optimum-intel), an extension of 🤗 [Transformers](https://github.com/huggingface/transformers), providing a set of performance optimization tools enabling maximum efficiency to accelerate end-to-end pipelines on a variety of Intel processors. If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers, 🤗 Datasets and 🤗 Optimum. Uncomment the following cell and run it.<jupyter_code>#! pip install datasets transformers optimum[neural-compressor]<jupyter_output><empty_output><jupyter_text>Make sure your version of 🤗 Optimum is at least 1.6.0 since the functionality was introduced in that version:<jupyter_code>from optimum.intel.version import __version__ print(__version__)<jupyter_output>1.7.0.dev0<jupyter_text>The GLUE Benchmark is a group of nine classification tasks on sentences or pairs of sentences which are:- [CoLA](https://nyu-mll.github.io/CoLA/) (Corpus of Linguistic Acceptability) Determine if a sentence is grammatically correct or not.- [MNLI](https://arxiv.org/abs/1704.05426) (Multi-Genre Natural Language Inference) Determine if a sentence entails, contradicts or is unrelated to a given hypothesis. This dataset has two versions, one with the validation and test set coming from the same distribution, another called mismatched where the validation and test use out-of-domain data.- [MRPC](https://www.microsoft.com/en-us/download/details.aspx?id=52398) (Microsoft Research Paraphrase Corpus) Determine if two sentences are paraphrases from one another or not.- [QNLI](https://rajpurkar.github.io/SQuAD-explorer/) (Question-answering Natural Language Inference) Determine if the answer to a question is in the second sentence or not. This dataset is built from the SQuAD dataset.- [QQP](https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs) (Quora Question Pairs2) Determine if two questions are semantically equivalent or not.- [RTE](https://aclweb.org/aclwiki/Recognizing_Textual_Entailment) (Recognizing Textual Entailment) Determine if a sentence entails a given hypothesis or not.- [SST-2](https://nlp.stanford.edu/sentiment/index.html) (Stanford Sentiment Treebank) Determine if the sentence has a positive or negative sentiment.- [STS-B](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark) (Semantic Textual Similarity Benchmark) Determine the similarity of two sentences with a score from 1 to 5.- [WNLI](https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html) (Winograd Natural Language Inference) Determine if a sentence with an anonymous pronoun and a sentence with this pronoun replaced are entailed or not. This dataset is built from the Winograd Schema Challenge dataset.We will see how to apply post-training static quantization on a DistilBERT model fine-tuned on the SST-2 task:<jupyter_code>GLUE_TASKS = ["cola", "mnli", "mnli-mm", "mrpc", "qnli", "qqp", "rte", "sst2", "stsb", "wnli"] task = "sst2" model_checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" batch_size = 16 max_train_samples = 200 max_eval_samples = 200<jupyter_output><empty_output><jupyter_text>Loading the dataset We will use the [🤗 Datasets](https://github.com/huggingface/datasets) and [🤗 Evaluate](https://github.com/huggingface/evaluate) libraries to download the data and get the metric we need to use for evaluation. This can be easily done with the functions `load_dataset` and `load`.Apart from `mnli-mm` being a special code, we can directly pass our task name to those functions. `load_dataset` will cache the dataset to avoid downloading it again the next time you run this cell.<jupyter_code>import evaluate from datasets import load_dataset actual_task = "mnli" if task == "mnli-mm" else task dataset = load_dataset("glue", actual_task) metric = evaluate.load("glue", actual_task)<jupyter_output>Found cached dataset glue (/home/ella/.cache/huggingface/datasets/glue/sst2/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)<jupyter_text>Note that `load` has loaded the proper metric associated to your task, which is:- for CoLA: [Matthews Correlation Coefficient](https://en.wikipedia.org/wiki/Matthews_correlation_coefficient)- for MNLI (matched or mismatched): Accuracy- for MRPC: Accuracy and [F1 score](https://en.wikipedia.org/wiki/F1_score)- for QNLI: Accuracy- for QQP: Accuracy and [F1 score](https://en.wikipedia.org/wiki/F1_score)- for RTE: Accuracy- for SST-2: Accuracy- for STS-B: [Pearson Correlation Coefficient](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient) and [Spearman's_Rank_Correlation_Coefficient](https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient)- for WNLI: Accuracyso the metric object only computes the one(s) needed for your task. We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("text_classification_quantization_inc_notebook", framework="none")<jupyter_output><empty_output><jupyter_text>Preprocessing the data Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires.To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure that:- we get a tokenizer that corresponds to the model architecture we want to use- we download the vocabulary used when pretraining this specific checkpointThat vocabulary will be cached, so it's not downloaded again the next time we run the cell.<jupyter_code>from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)<jupyter_output><empty_output><jupyter_text>To preprocess our dataset, we will thus need the names of the columns containing the sentence(s). The following dictionary keeps track of the correspondence task to column names:<jupyter_code>task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mnli-mm": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), }<jupyter_output><empty_output><jupyter_text>We can double check it does work on our current dataset:<jupyter_code>sentence1_key, sentence2_key = task_to_keys[task] if sentence2_key is None: print(f"Sentence: {dataset['train'][0][sentence1_key]}") else: print(f"Sentence 1: {dataset['train'][0][sentence1_key]}") print(f"Sentence 2: {dataset['train'][0][sentence2_key]}")<jupyter_output>Sentence: hide new secretions from the parental units<jupyter_text>We can then write the function that will preprocess our samples. We just feed them to the `tokenizer` with the argument `truncation=True`. This will ensure that an input longer than what the model selected can handle will be truncated to the maximum length accepted by the model.<jupyter_code>max_seq_length = min(128, tokenizer.model_max_length) padding = "max_length" def preprocess_function(examples): args = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) return tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)<jupyter_output><empty_output><jupyter_text>To apply this function on all the sentences (or pairs of sentences) in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command.<jupyter_code>encoded_dataset = dataset.map(preprocess_function, batched=True)<jupyter_output>Loading cached processed dataset at /home/ella/.cache/huggingface/datasets/glue/sst2/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad/cache-2f40245b2230ff65.arrow Loading cached processed dataset at /home/ella/.cache/huggingface/datasets/glue/sst2/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad/cache-df1324c32baf5c62.arrow Loading cached processed dataset at /home/ella/.cache/huggingface/datasets/glue/sst2/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad/cache-83b8433647ed2f99.arrow<jupyter_text>Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. Applying quantization on the model Quantization aware training simulates the effects of quantization during training in order to alleviate its effects on the model's performance.Now that our data is ready, we can download the pretrained model and fine-tune it. Since all our tasks are about sentence classification, we use the `AutoModelForSequenceClassification` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us. The only thing we have to specify is the number of labels for our problem (which is always 2, except for STS-B which is a regression problem and MNLI where we have 3 labels):<jupyter_code>from transformers import AutoModelForSequenceClassification, TrainingArguments, default_data_collator model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint)<jupyter_output><empty_output><jupyter_text>The `INCTrainer` class provides an API to train your model while combining different compression techniques such as knowledge distillation, pruning and quantization. The `INCTrainer` is very similar to the 🤗 Transformers `Trainer`, which can be replaced with minimal changes in your code. In addition to the usual To instantiate an `INCTrainer`, we will need to define three more things. First, we need to create the quantization configuration describing the quantization proccess we wish to apply. Quantization will be applied on the embeddings, on the linear layers as well as on their corresponding input activations.<jupyter_code>from neural_compressor import QuantizationAwareTrainingConfig quantization_config = QuantizationAwareTrainingConfig()<jupyter_output><empty_output><jupyter_text>[`TrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.htmltransformers.TrainingArguments) is a class that contains all the attributes to customize the training. It requires one folder name, which will be used to save the checkpoints of the model, and all other arguments are optional:<jupyter_code>metric_name = "pearson" if task == "stsb" else "matthews_correlation" if task == "cola" else "accuracy" save_directory = f"{model_checkpoint.split('/')[-1]}-finetuned-{task}" args = TrainingArguments( output_dir = save_directory, do_train=True, do_eval=False, evaluation_strategy = "epoch", save_strategy = "epoch", learning_rate=2e-5, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, num_train_epochs=1, weight_decay=0.01, load_best_model_at_end=True, metric_for_best_model=metric_name, )<jupyter_output><empty_output><jupyter_text>The last thing to define for our `INCTrainer` is how to compute the metrics from the predictions. We need to define a function for this, which will just use the `metric` we loaded earlier, the only preprocessing we have to do is to take the argmax of our predicted logits (our just squeeze the last axis in the case of STS-B):<jupyter_code>import numpy as np def compute_metrics(eval_pred): predictions, labels = eval_pred if task != "stsb": predictions = np.argmax(predictions, axis=1) else: predictions = predictions[:, 0] return metric.compute(predictions=predictions, references=labels)<jupyter_output><empty_output><jupyter_text>Then we just need to pass all of this along with our datasets to the `INCTrainer`:<jupyter_code>import copy from optimum.intel.neural_compressor import INCTrainer validation_key = "validation_mismatched" if task == "mnli-mm" else "validation_matched" if task == "mnli" else "validation" trainer = INCTrainer( model=model, quantization_config=quantization_config, task="sequence-classification", # optional : only needed to export the model to the ONNX format args=args, train_dataset=encoded_dataset["train"].select(range(max_train_samples)), eval_dataset=encoded_dataset[validation_key].select(range(max_eval_samples)), compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=default_data_collator, ) fp_model = copy.deepcopy(model)<jupyter_output>2023-01-13 13:05:39 [WARNING] Force convert framework model to neural_compressor model.<jupyter_text>We can now finetune our model by just calling the `train` method:<jupyter_code>trainer.train()<jupyter_output>/home/ella/miniconda3/envs/inc/lib/python3.9/site-packages/transformers/optimization.py:306: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning warnings.warn( 2023-01-13 13:05:40 [INFO] Fx trace of the entire model failed. We will conduct auto quantization<jupyter_text>We can run evaluation by just calling the `evaluate` method:<jupyter_code>trainer.evaluate() import os import torch def get_model_size(model): torch.save(model.state_dict(), "tmp.pt") model_size = os.path.getsize("tmp.pt") / (1024*1024) os.remove("tmp.pt") return round(model_size, 2) fp_model_size = get_model_size(fp_model) q_model_size = get_model_size(trainer.model) print(f"The full-precision model size is {round(fp_model_size)} MB while the quantized model one is {round(q_model_size)} MB.") print(f"The quantized model is {round(fp_model_size / q_model_size, 2)}x smaller than the full-precision one.")<jupyter_output>The full-precision model size is 255 MB while the quantized model one is 65 MB. The quantized model is 3.93x smaller than the full-precision one.<jupyter_text>To save the resulting quantized model, you can use the `save_model` method. By setting `save_onnx_model` to `True`, the model will be additionnaly exported to the ONNX format.<jupyter_code>trainer.save_model(save_onnx_model=True)<jupyter_output>Configuration saved in distilbert-base-uncased-finetuned-sst-2-english-finetuned-sst2/config.json tokenizer config file saved in distilbert-base-uncased-finetuned-sst-2-english-finetuned-sst2/tokenizer_config.json Special tokens file saved in distilbert-base-uncased-finetuned-sst-2-english-finetuned-sst2/special_tokens_map.json 2023-01-13 13:07:10 [WARNING] QDQ format requires opset_version >= 13, we reset opset_version=13 here /home/ella/miniconda3/envs/inc/lib/python3.9/site-packages/transformers/models/distilbert/modeling_distilbert.py:217: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. mask, torch.tensor(torch.finfo(scores.dtype).min) 2023-01-13 13:07:12 [INFO] Weight type: QInt8. 2023-01-13 13:07:12 [INFO] Activation type: QUIn[...]<jupyter_text>Loading the quantized model You must instantiate you model using our `INCModelForXxx`[https://huggingface.co/docs/optimum/main/intel/reference_incoptimum.intel.neural_compressor.INCModel] or `ORTModelForXxx`[https://huggingface.co/docs/optimum/onnxruntime/package_reference/modeling_ort] classes to load respectively your quantized PyTorch or ONNX model hosted locally or on the 🤗 hub :<jupyter_code>from optimum.intel.neural_compressor import INCModelForSequenceClassification from optimum.onnxruntime import ORTModelForSequenceClassification pytorch_model = INCModelForSequenceClassification.from_pretrained(save_directory) onnx_model = ORTModelForSequenceClassification.from_pretrained(save_directory)<jupyter_output>loading configuration file distilbert-base-uncased-finetuned-sst-2-english-finetuned-sst2/config.json Model config DistilBertConfig { "_name_or_path": "distilbert-base-uncased-finetuned-sst-2-english-finetuned-sst2", "activation": "gelu", "architectures": [ "DistilBertForSequenceClassification" ], "attention_dropout": 0.1, "dim": 768, "dropout": 0.1, "finetuning_task": "sst-2", "framework": "pytorch_fx", "hidden_dim": 3072, "id2label": { "0": "NEGATIVE", "1": "POSITIVE" }, "initializer_range": 0.02, "label2id": { "NEGATIVE": 0, "POSITIVE": 1 }, "max_position_embeddings": 512, "model_type": "distilbert", "n_heads": 12, "n_layers": 6, "output_past": true, "pad_token_id": 0, "problem_type": "single_label_classification", "qa_dropout": 0.1, "seq_classif_dropout": 0.2, "sinusoidal_pos_embds": false, "tie_weights_": true, "torch_dtype": "int8", "transformers_version": "4.25.1", "vocab_size": 30522 } loading config[...]
notebooks/examples/text_classification_quantization_inc.ipynb/0
{ "file_path": "notebooks/examples/text_classification_quantization_inc.ipynb", "repo_id": "notebooks", "token_count": 5868 }
145
<jupyter_start><jupyter_text>Huggingface Sagemaker-sdk - Getting Started Demo Binary Classification with `Trainer` and `imdb` dataset 1. [Introduction](Introduction) 2. [Development Environment and Permissions](Development-Environment-and-Permissions) 1. [Installation](Installation) 2. [Development environment](Development-environment) 3. [Permissions](Permissions)3. [Processing](Preprocessing) 1. [Tokenization](Tokenization) 2. [Uploading data to sagemaker_session_bucket](Uploading-data-to-sagemaker_session_bucket) 4. [Fine-tuning & starting Sagemaker Training Job](Fine-tuning-\&-starting-Sagemaker-Training-Job) 1. [Creating an Estimator and start a training job](Creating-an-Estimator-and-start-a-training-job) 2. [Estimator Parameters](Estimator-Parameters) 3. [Download fine-tuned model from s3](Download-fine-tuned-model-from-s3) 3. [Attach to old training job to an estimator ](Attach-to-old-training-job-to-an-estimator) 5. [_Coming soon_:Push model to the Hugging Face hub](Push-model-to-the-Hugging-Face-hub) IntroductionWelcome to our end-to-end binary Text-Classification example. In this demo, we will use the Hugging Faces `transformers` and `datasets` library together with a custom Amazon sagemaker-sdk extension to fine-tune a pre-trained transformer on binary text classification. In particular, the pre-trained model will be fine-tuned using the `imdb` dataset. To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. _**NOTE: You can run this demo in Sagemaker Studio, your local machine or Sagemaker Notebook Instances**_ Development Environment and Permissions Installation_*Note:* we only install the required libraries from Hugging Face and AWS. You also need PyTorch or Tensorflow, if you haven´t it installed_<jupyter_code>!pip install "sagemaker>=2.140.0" "transformers==4.26.1" "datasets[s3]==2.10.1" --upgrade<jupyter_output><empty_output><jupyter_text>Development environment<jupyter_code>import sagemaker.huggingface<jupyter_output><empty_output><jupyter_text>Permissions _If you are going to use Sagemaker in a local environment. You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it._<jupyter_code>import sagemaker import boto3 sess = sagemaker.Session() # sagemaker session bucket -> used for uploading data, models and logs # sagemaker will automatically create this bucket if it not exists sagemaker_session_bucket=None if sagemaker_session_bucket is None and sess is not None: # set to default bucket if a bucket name is not given sagemaker_session_bucket = sess.default_bucket() try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] sess = sagemaker.Session(default_bucket=sagemaker_session_bucket) print(f"sagemaker role arn: {role}") print(f"sagemaker bucket: {sess.default_bucket()}") print(f"sagemaker session region: {sess.boto_region_name}")<jupyter_output><empty_output><jupyter_text>PreprocessingWe are using the `datasets` library to download and preprocess the `imdb` dataset. After preprocessing, the dataset will be uploaded to our `sagemaker_session_bucket` to be used within our training job. The [imdb](http://ai.stanford.edu/~amaas/data/sentiment/) dataset consists of 25000 training and 25000 testing highly polar movie reviews. Tokenization<jupyter_code>from datasets import load_dataset from transformers import AutoTokenizer # tokenizer used in preprocessing tokenizer_name = 'distilbert-base-uncased' # dataset used dataset_name = 'imdb' # s3 key prefix for the data s3_prefix = 'samples/datasets/imdb' # load dataset dataset = load_dataset(dataset_name) # download tokenizer tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) # tokenizer helper function def tokenize(batch): return tokenizer(batch['text'], padding='max_length', truncation=True) # load dataset train_dataset, test_dataset = load_dataset('imdb', split=['train', 'test']) test_dataset = test_dataset.shuffle().select(range(10000)) # smaller the size for test dataset to 10k # tokenize dataset train_dataset = train_dataset.map(tokenize, batched=True) test_dataset = test_dataset.map(tokenize, batched=True) # set format for pytorch train_dataset = train_dataset.rename_column("label", "labels") train_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels']) test_dataset = test_dataset.rename_column("label", "labels") test_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels'])<jupyter_output><empty_output><jupyter_text>Uploading data to `sagemaker_session_bucket`After we processed the `datasets` we are going to use the new `FileSystem` [integration](https://huggingface.co/docs/datasets/filesystems.html) to upload our dataset to S3.<jupyter_code># save train_dataset to s3 training_input_path = f's3://{sess.default_bucket()}/{s3_prefix}/train' train_dataset.save_to_disk(training_input_path) # save test_dataset to s3 test_input_path = f's3://{sess.default_bucket()}/{s3_prefix}/test' test_dataset.save_to_disk(test_input_path)<jupyter_output><empty_output><jupyter_text>Fine-tuning & starting Sagemaker Training JobIn order to create a sagemaker training job we need an `HuggingFace` Estimator. The Estimator handles end-to-end Amazon SageMaker training and deployment tasks. In a Estimator we define, which fine-tuning script should be used as `entry_point`, which `instance_type` should be used, which `hyperparameters` are passed in .....```pythonhuggingface_estimator = HuggingFace(entry_point='train.py', source_dir='./scripts', base_job_name='huggingface-sdk-extension', instance_type='ml.p3.2xlarge', instance_count=1, transformers_version='4.4', pytorch_version='1.6', py_version='py36', role=role, hyperparameters = {'epochs': 1, 'train_batch_size': 32, 'model_name':'distilbert-base-uncased' })```When we create a SageMaker training job, SageMaker takes care of starting and managing all the required ec2 instances for us with the `huggingface` container, uploads the provided fine-tuning script `train.py` and downloads the data from our `sagemaker_session_bucket` into the container at `/opt/ml/input/data`. Then, it starts the training job by running. ```python/opt/conda/bin/python train.py --epochs 1 --model_name distilbert-base-uncased --train_batch_size 32```The `hyperparameters` you define in the `HuggingFace` estimator are passed in as named arguments. Sagemaker is providing useful properties about the training environment through various environment variables, including the following:* `SM_MODEL_DIR`: A string that represents the path where the training job writes the model artifacts to. After training, artifacts in this directory are uploaded to S3 for model hosting.* `SM_NUM_GPUS`: An integer representing the number of GPUs available to the host.* `SM_CHANNEL_XXXX:` A string that represents the path to the directory that contains the input data for the specified channel. For example, if you specify two input channels in the HuggingFace estimator’s fit call, named `train` and `test`, the environment variables `SM_CHANNEL_TRAIN` and `SM_CHANNEL_TEST` are set.To run your training job locally you can define `instance_type='local'` or `instance_type='local_gpu'` for gpu usage. _Note: this does not working within SageMaker Studio_<jupyter_code>!pygmentize ./scripts/train.py<jupyter_output><empty_output><jupyter_text>Creating an Estimator and start a training job<jupyter_code>from sagemaker.huggingface import HuggingFace # hyperparameters, which are passed into the training job hyperparameters={'epochs': 1, 'train_batch_size': 32, 'model_name':'distilbert-base-uncased' } huggingface_estimator = HuggingFace(entry_point='train.py', source_dir='./scripts', instance_type='ml.p3.2xlarge', instance_count=1, role=role, transformers_version='4.26', pytorch_version='1.13', py_version='py39', hyperparameters = hyperparameters) # starting the train job with our uploaded datasets as input huggingface_estimator.fit({'train': training_input_path, 'test': test_input_path})<jupyter_output><empty_output><jupyter_text>Deploying the endpointTo deploy our endpoint, we call `deploy()` on our HuggingFace estimator object, passing in our desired number of instances and instance type.<jupyter_code>predictor = huggingface_estimator.deploy(1, "ml.g4dn.xlarge")<jupyter_output><empty_output><jupyter_text>Then, we use the returned predictor object to call the endpoint.<jupyter_code>sentiment_input= {"inputs":"I love using the new Inference DLC."} predictor.predict(sentiment_input)<jupyter_output><empty_output><jupyter_text>Finally, we delete the endpoint again.<jupyter_code>predictor.delete_model() predictor.delete_endpoint()<jupyter_output><empty_output><jupyter_text>Extras Estimator Parameters<jupyter_code># container image used for training job print(f"container image used for training job: \n{huggingface_estimator.image_uri}\n") # s3 uri where the trained model is located print(f"s3 uri where the trained model is located: \n{huggingface_estimator.model_data}\n") # latest training job name for this estimator print(f"latest training job name for this estimator: \n{huggingface_estimator.latest_training_job.name}\n") # access the logs of the training job huggingface_estimator.sagemaker_session.logs_for_job(huggingface_estimator.latest_training_job.name)<jupyter_output><empty_output><jupyter_text>Attach to old training job to an estimator In Sagemaker you can attach an old training job to an estimator to continue training, get results etc..<jupyter_code>from sagemaker.estimator import Estimator # job which is going to be attached to the estimator old_training_job_name='' # attach old training job huggingface_estimator_loaded = Estimator.attach(old_training_job_name) # get model output s3 from training job huggingface_estimator_loaded.model_data<jupyter_output><empty_output>
notebooks/sagemaker/01_getting_started_pytorch/sagemaker-notebook.ipynb/0
{ "file_path": "notebooks/sagemaker/01_getting_started_pytorch/sagemaker-notebook.ipynb", "repo_id": "notebooks", "token_count": 3851 }
146
<jupyter_start><jupyter_text>Huggingface Sagemaker-sdk - Deploy 🤗 Transformers for inference Welcome to this getting started guide, we will use the new Hugging Face Inference DLCs and Amazon SageMaker Python SDK to deploy a transformer model for inference. In this example we deploy a trained Hugging Face Transformer model on to SageMaker for inference. API - [SageMaker Hugging Face Inference Toolkit](https://github.com/aws/sagemaker-huggingface-inference-toolkit) Using the `transformers pipelines`, we designed an API, which makes it easy for you to benefit from all `pipelines` features. The API is oriented at the API of the [🤗 Accelerated Inference API](https://api-inference.huggingface.co/docs/python/html/detailed_parameters.html), meaning your inputs need to be defined in the `inputs` key and if you want additional supported `pipelines` parameters you can add them in the `parameters` key. Below you can find examples for requests. **text-classification request body**```python{ "inputs": "Camera - You are awarded a SiPix Digital Camera! call 09061221066 fromm landline. Delivery within 28 days."}```**question-answering request body**```python{ "inputs": { "question": "What is used for inference?", "context": "My Name is Philipp and I live in Nuremberg. This model is used with sagemaker for inference." }}```**zero-shot classification request body**```python{ "inputs": "Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!", "parameters": { "candidate_labels": [ "refund", "legal", "faq" ] }}```<jupyter_code>!pip install "sagemaker>=2.48.0" --upgrade<jupyter_output><empty_output><jupyter_text>Deploy a Hugging Face Transformer model from S3 to SageMaker for inference There are two ways on how you can deploy you SageMaker trained Hugging Face model from S3. You can either deploy it after your training is finished or you can deploy it later using the `model_data` pointing to you saved model on s3. Deploy the model directly after trainingIf you deploy you model directly after training you need to make sure that all required files are saved in your training script, including the Tokenizer and the Model. ```pythonfrom sagemaker.huggingface import HuggingFace pseudo code start create HuggingFace estimator for running traininghuggingface_estimator = HuggingFace(....) starting the train job with our uploaded datasets as inputhuggingface_estimator.fit(...) pseudo code end deploy model to SageMaker Inferencepredictor = huggingface_estimator.deploy(initial_instance_count=1, instance_type="ml.m5.xlarge") example request, you always need to define "inputs"data = { "inputs": "Camera - You are awarded a SiPix Digital Camera! call 09061221066 fromm landline. Delivery within 28 days."} requestpredictor.predict(data)``` Deploy the model using `model_data`<jupyter_code>import sagemaker import boto3 try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] print(f"sagemaker role arn: {role}") from sagemaker.huggingface import HuggingFaceModel # create Hugging Face Model Class huggingface_model = HuggingFaceModel( model_data="s3://hf-sagemaker-inference/model.tar.gz", # path to your trained sagemaker model role=role, # iam role with permissions to create an Endpoint transformers_version="4.26", # transformers version used pytorch_version="1.13", # pytorch version used py_version="py39", # python version of the DLC ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, instance_type="ml.m5.xlarge" ) # example request, you always need to define "inputs" data = { "inputs": "The new Hugging Face SageMaker DLC makes it super easy to deploy models in production. I love it!" } # request predictor.predict(data) # delete endpoint predictor.delete_model() predictor.delete_endpoint()<jupyter_output><empty_output>
notebooks/sagemaker/10_deploy_model_from_s3/deploy_transformer_model_from_s3.ipynb/0
{ "file_path": "notebooks/sagemaker/10_deploy_model_from_s3/deploy_transformer_model_from_s3.ipynb", "repo_id": "notebooks", "token_count": 1178 }
147
<jupyter_start><jupyter_text>Train LLMs using QLoRA on Amazon SageMakerIn this sagemaker example, we are going to learn how to apply [QLoRA: Efficient Finetuning of Quantized LLMs](https://arxiv.org/abs/2305.14314) to fine-tune Falcon 40B. QLoRA is an efficient finetuning technique that quantizes a pretrained language model to 4 bits and attaches small “Low-Rank Adapters” which are fine-tuned. This enables fine-tuning of models with up to 65 billion parameters on a single GPU; despite its efficiency, QLoRA matches the performance of full-precision fine-tuning and achieves state-of-the-art results on language tasks.In our example, we are going to leverage Hugging Face [Transformers](https://huggingface.co/docs/transformers/index), [Accelerate](https://huggingface.co/docs/accelerate/index), and [PEFT](https://github.com/huggingface/peft). In Detail you will learn how to:1. Setup Development Environment2. Load and prepare the dataset3. Fine-Tune Falcon 40B with QLoRA on Amazon SageMaker Quick intro: PEFT or Parameter Efficient Fine-tuning[PEFT](https://github.com/huggingface/peft), or Parameter Efficient Fine-tuning, is a new open-source library from Hugging Face to enable efficient adaptation of pre-trained language models (PLMs) to various downstream applications without fine-tuning all the model's parameters. PEFT currently includes techniques for:- (Q)LoRA: [LORA: LOW-RANK ADAPTATION OF LARGE LANGUAGE MODELS](https://arxiv.org/pdf/2106.09685.pdf)- Prefix Tuning: [P-Tuning v2: Prompt Tuning Can Be Comparable to Fine-tuning Universally Across Scales and Tasks](https://arxiv.org/pdf/2110.07602.pdf)- P-Tuning: [GPT Understands, Too](https://arxiv.org/pdf/2103.10385.pdf)- Prompt Tuning: [The Power of Scale for Parameter-Efficient Prompt Tuning](https://arxiv.org/pdf/2104.08691.pdf)<jupyter_code>!pip install "transformers==4.30.2" "datasets[s3]==2.13.0" sagemaker --upgrade --quiet<jupyter_output><empty_output><jupyter_text>If you are going to use Sagemaker in a local environment. You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it.<jupyter_code>import sagemaker import boto3 sess = sagemaker.Session() # sagemaker session bucket -> used for uploading data, models and logs # sagemaker will automatically create this bucket if it not exists sagemaker_session_bucket=None if sagemaker_session_bucket is None and sess is not None: # set to default bucket if a bucket name is not given sagemaker_session_bucket = sess.default_bucket() try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] sess = sagemaker.Session(default_bucket=sagemaker_session_bucket) print(f"sagemaker role arn: {role}") print(f"sagemaker bucket: {sess.default_bucket()}") print(f"sagemaker session region: {sess.boto_region_name}")<jupyter_output><empty_output><jupyter_text>2. Load and prepare the datasetwe will use the [dolly](https://huggingface.co/datasets/databricks/databricks-dolly-15k) an open source dataset of instruction-following records generated by thousands of Databricks employees in several of the behavioral categories outlined in the [InstructGPT paper](https://arxiv.org/abs/2203.02155), including brainstorming, classification, closed QA, generation, information extraction, open QA, and summarization.```python{ "instruction": "What is world of warcraft", "context": "", "response": "World of warcraft is a massive online multi player role playing game. It was released in 2004 by bizarre entertainment"}```To load the `samsum` dataset, we use the `load_dataset()` method from the 🤗 Datasets library.<jupyter_code>from datasets import load_dataset from random import randrange # Load dataset from the hub dataset = load_dataset("databricks/databricks-dolly-15k", split="train") print(f"dataset size: {len(dataset)}") print(dataset[randrange(len(dataset))]) # dataset size: 15011<jupyter_output><empty_output><jupyter_text>To instruct tune our model we need to convert our structured examples into a collection of tasks described via instructions. We define a `formatting_function` that takes a sample and returns a string with our format instruction.<jupyter_code>def format_dolly(sample): instruction = f"### Instruction\n{sample['instruction']}" context = f"### Context\n{sample['context']}" if len(sample["context"]) > 0 else None response = f"### Answer\n{sample['response']}" # join all the parts together prompt = "\n\n".join([i for i in [instruction, context, response] if i is not None]) return prompt<jupyter_output><empty_output><jupyter_text>lets test our formatting function on a random example.<jupyter_code>from random import randrange print(format_dolly(dataset[randrange(len(dataset))]))<jupyter_output>### Instruction Who is the most decorated olympian of all time? ### Answer Michael Phelps is the most decorated olympian winning a total of 28 medals.<jupyter_text>In addition, to formatting our samples we also want to pack multiple samples to one sequence to have a more efficient training.<jupyter_code>from transformers import AutoTokenizer model_id = "tiiuae/falcon-40b" # sharded weights tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) tokenizer.pad_token = tokenizer.eos_token<jupyter_output><empty_output><jupyter_text>We define some helper functions to pack our samples into sequences of a given length and then tokenize them.<jupyter_code>from random import randint from itertools import chain from functools import partial # template dataset to add prompt to each sample def template_dataset(sample): sample["text"] = f"{format_dolly(sample)}{tokenizer.eos_token}" return sample # apply prompt template per sample dataset = dataset.map(template_dataset, remove_columns=list(dataset.features)) # print random sample print(dataset[randint(0, len(dataset))]["text"]) # empty list to save remainder from batches to use in next batch remainder = {"input_ids": [], "attention_mask": [], "token_type_ids": []} def chunk(sample, chunk_length=2048): # define global remainder variable to save remainder from batches to use in next batch global remainder # Concatenate all texts and add remainder from previous batch concatenated_examples = {k: list(chain(*sample[k])) for k in sample.keys()} concatenated_examples = {k: remainder[k] + concatenated_examples[k] for k in concatenated_examples.keys()} # get total number of tokens for batch batch_total_length = len(concatenated_examples[list(sample.keys())[0]]) # get max number of chunks for batch if batch_total_length >= chunk_length: batch_chunk_length = (batch_total_length // chunk_length) * chunk_length # Split by chunks of max_len. result = { k: [t[i : i + chunk_length] for i in range(0, batch_chunk_length, chunk_length)] for k, t in concatenated_examples.items() } # add remainder to global variable for next batch remainder = {k: concatenated_examples[k][batch_chunk_length:] for k in concatenated_examples.keys()} # prepare labels result["labels"] = result["input_ids"].copy() return result # tokenize and chunk dataset lm_dataset = dataset.map( lambda sample: tokenizer(sample["text"]), batched=True, remove_columns=list(dataset.features) ).map( partial(chunk, chunk_length=2048), batched=True, ) # Print total number of samples print(f"Total number of samples: {len(lm_dataset)}")<jupyter_output><empty_output><jupyter_text>After we processed the datasets we are going to use the new [FileSystem integration](https://huggingface.co/docs/datasets/filesystems) to upload our dataset to S3. We are using the `sess.default_bucket()`, adjust this if you want to store the dataset in a different S3 bucket. We will use the S3 path later in our training script.<jupyter_code># save train_dataset to s3 training_input_path = f's3://{sess.default_bucket()}/processed/dolly/train' lm_dataset.save_to_disk(training_input_path) print("uploaded data to:") print(f"training dataset to: {training_input_path}")<jupyter_output><empty_output><jupyter_text>3. Fine-Tune Falcon 40B with QLoRA on Amazon SageMakerWe are going to use the recently introduced method in the paper "[QLoRA: Quantization-aware Low-Rank Adapter Tuning for Language Generation](https://arxiv.org/abs/2106.09685)" by Tim Dettmers et al. QLoRA is a new technique to reduce the memory footprint of large language models during finetuning, without sacrificing performance. The TL;DR; of how QLoRA works is: * Quantize the pretrained model to 4 bits and freezing it.* Attach small, trainable adapter layers. (LoRA)* Finetune only the adapter layers, while using the frozen quantized model for context.We prepared a [run_clm.py](./scripts/run_clm.py), which implements QLora using PEFT to train our model. The script also merges the LoRA weights into the model weights after training. That way you can use the model as a normal model without any additional code.In order to create a sagemaker training job we need an `HuggingFace` Estimator. The Estimator handles end-to-end Amazon SageMaker training and deployment tasks. The Estimator manages the infrastructure use. SagMaker takes care of starting and managing all the required ec2 instances for us, provides the correct huggingface container, uploads the provided scripts and downloads the data from our S3 bucket into the container at `/opt/ml/input/data`. Then, it starts the training job by running.<jupyter_code>import time # define Training Job Name job_name = f'huggingface-qlora-{time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())}' from sagemaker.huggingface import HuggingFace # hyperparameters, which are passed into the training job hyperparameters ={ 'model_id': model_id, # pre-trained model 'dataset_path': '/opt/ml/input/data/training', # path where sagemaker will save training dataset 'epochs': 3, # number of training epochs 'per_device_train_batch_size': 4, # batch size for training 'lr': 2e-4, # learning rate used during training } # create the Estimator huggingface_estimator = HuggingFace( entry_point = 'run_clm.py', # train script source_dir = 'scripts', # directory which includes all the files needed for training instance_type = 'ml.g5.12xlarge', # instances type used for the training job instance_count = 1, # the number of instances used for training base_job_name = job_name, # the name of the training job role = role, # Iam role used in training job to access AWS ressources, e.g. S3 volume_size = 300, # the size of the EBS volume in GB transformers_version = '4.28', # the transformers version used in the training job pytorch_version = '2.0', # the pytorch_version version used in the training job py_version = 'py310', # the python version used in the training job hyperparameters = hyperparameters, environment = { "HUGGINGFACE_HUB_CACHE": "/tmp/.cache" }, # set env variable to cache models in /tmp )<jupyter_output><empty_output><jupyter_text>We can now start our training job, with the `.fit()` method passing our S3 path to the training script.<jupyter_code># define a data input dictonary with our uploaded s3 uris data = {'training': training_input_path} # starting the train job with our uploaded datasets as input huggingface_estimator.fit(data, wait=True)<jupyter_output><empty_output>
notebooks/sagemaker/28_train_llms_with_qlora/sagemaker-notebook.ipynb/0
{ "file_path": "notebooks/sagemaker/28_train_llms_with_qlora/sagemaker-notebook.ipynb", "repo_id": "notebooks", "token_count": 3913 }
148
.PHONY: quality style test docs check_dirs := src tests examples docs # Check that source code meets quality standards # this target runs checks on all files quality: black --check $(check_dirs) ruff $(check_dirs) doc-builder style src/peft tests docs/source --max_len 119 --check_only # Format source code automatically and check is there are any problems left that need manual fixing style: black $(check_dirs) ruff $(check_dirs) --fix doc-builder style src/peft tests docs/source --max_len 119 test: python -m pytest -n 3 tests/ $(if $(IS_GITHUB_CI),--report-log "ci_tests.log",) tests_examples_multi_gpu: python -m pytest -m multi_gpu_tests tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "multi_gpu_examples.log",) tests_examples_single_gpu: python -m pytest -m single_gpu_tests tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "single_gpu_examples.log",) tests_core_multi_gpu: python -m pytest -m multi_gpu_tests tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_multi_gpu.log",) tests_core_single_gpu: python -m pytest -m single_gpu_tests tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_single_gpu.log",) tests_common_gpu: python -m pytest tests/test_decoder_models.py $(if $(IS_GITHUB_CI),--report-log "common_decoder.log",) python -m pytest tests/test_encoder_decoder_models.py $(if $(IS_GITHUB_CI),--report-log "common_encoder_decoder.log",) tests_examples_multi_gpu_bnb: python -m pytest -m "multi_gpu_tests and bitsandbytes" tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "multi_gpu_examples.log",) tests_examples_single_gpu_bnb: python -m pytest -m "single_gpu_tests and bitsandbytes" tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "single_gpu_examples.log",) tests_core_multi_gpu_bnb: python -m pytest -m "multi_gpu_tests and bitsandbytes" tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_multi_gpu.log",) tests_core_single_gpu_bnb: python -m pytest -m "single_gpu_tests and bitsandbytes" tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_single_gpu.log",) # For testing transformers tests for bnb runners transformers_tests: RUN_SLOW=1 python -m pytest transformers-clone/tests/quantization/bnb $(if $(IS_GITHUB_CI),--report-log "transformers_tests.log",) tests_regression: python -m pytest -s --regression tests/regression/ $(if $(IS_GITHUB_CI),--report-log "regression_tests.log",)
peft/Makefile/0
{ "file_path": "peft/Makefile", "repo_id": "peft", "token_count": 905 }
149
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Working with custom models Some fine-tuning techniques, such as prompt tuning, are specific to language models. That means in 🤗 PEFT, it is assumed a 🤗 Transformers model is being used. However, other fine-tuning techniques - like [LoRA](../conceptual_guides/lora) - are not restricted to specific model types. In this guide, we will see how LoRA can be applied to a multilayer perceptron, a computer vision model from the [timm](https://huggingface.co/docs/timm/index) library, or a new 🤗 Transformers architectures. ## Multilayer perceptron Let's assume that we want to fine-tune a multilayer perceptron with LoRA. Here is the definition: ```python from torch import nn class MLP(nn.Module): def __init__(self, num_units_hidden=2000): super().__init__() self.seq = nn.Sequential( nn.Linear(20, num_units_hidden), nn.ReLU(), nn.Linear(num_units_hidden, num_units_hidden), nn.ReLU(), nn.Linear(num_units_hidden, 2), nn.LogSoftmax(dim=-1), ) def forward(self, X): return self.seq(X) ``` This is a straightforward multilayer perceptron with an input layer, a hidden layer, and an output layer. <Tip> For this toy example, we choose an exceedingly large number of hidden units to highlight the efficiency gains from PEFT, but those gains are in line with more realistic examples. </Tip> There are a few linear layers in this model that could be tuned with LoRA. When working with common 🤗 Transformers models, PEFT will know which layers to apply LoRA to, but in this case, it is up to us as a user to choose the layers. To determine the names of the layers to tune: ```python print([(n, type(m)) for n, m in MLP().named_modules()]) ``` This should print: ``` [('', __main__.MLP), ('seq', torch.nn.modules.container.Sequential), ('seq.0', torch.nn.modules.linear.Linear), ('seq.1', torch.nn.modules.activation.ReLU), ('seq.2', torch.nn.modules.linear.Linear), ('seq.3', torch.nn.modules.activation.ReLU), ('seq.4', torch.nn.modules.linear.Linear), ('seq.5', torch.nn.modules.activation.LogSoftmax)] ``` Let's say we want to apply LoRA to the input layer and to the hidden layer, those are `'seq.0'` and `'seq.2'`. Moreover, let's assume we want to update the output layer without LoRA, that would be `'seq.4'`. The corresponding config would be: ```python from peft import LoraConfig config = LoraConfig( target_modules=["seq.0", "seq.2"], modules_to_save=["seq.4"], ) ``` With that, we can create our PEFT model and check the fraction of parameters trained: ```python from peft import get_peft_model model = MLP() peft_model = get_peft_model(model, config) peft_model.print_trainable_parameters() # prints trainable params: 56,164 || all params: 4,100,164 || trainable%: 1.369798866581922 ``` Finally, we can use any training framework we like, or write our own fit loop, to train the `peft_model`. For a complete example, check out [this notebook](https://github.com/huggingface/peft/blob/main/examples/multilayer_perceptron/multilayer_perceptron_lora.ipynb). ## timm models The [timm](https://huggingface.co/docs/timm/index) library contains a large number of pretrained computer vision models. Those can also be fine-tuned with PEFT. Let's check out how this works in practice. To start, ensure that timm is installed in the Python environment: ```bash python -m pip install -U timm ``` Next we load a timm model for an image classification task: ```python import timm num_classes = ... model_id = "timm/poolformer_m36.sail_in1k" model = timm.create_model(model_id, pretrained=True, num_classes=num_classes) ``` Again, we need to make a decision about what layers to apply LoRA to. Since LoRA supports 2D conv layers, and since those are a major building block of this model, we should apply LoRA to the 2D conv layers. To identify the names of those layers, let's look at all the layer names: ```python print([(n, type(m)) for n, m in model.named_modules()]) ``` This will print a very long list, we'll only show the first few: ``` [('', timm.models.metaformer.MetaFormer), ('stem', timm.models.metaformer.Stem), ('stem.conv', torch.nn.modules.conv.Conv2d), ('stem.norm', torch.nn.modules.linear.Identity), ('stages', torch.nn.modules.container.Sequential), ('stages.0', timm.models.metaformer.MetaFormerStage), ('stages.0.downsample', torch.nn.modules.linear.Identity), ('stages.0.blocks', torch.nn.modules.container.Sequential), ('stages.0.blocks.0', timm.models.metaformer.MetaFormerBlock), ('stages.0.blocks.0.norm1', timm.layers.norm.GroupNorm1), ('stages.0.blocks.0.token_mixer', timm.models.metaformer.Pooling), ('stages.0.blocks.0.token_mixer.pool', torch.nn.modules.pooling.AvgPool2d), ('stages.0.blocks.0.drop_path1', torch.nn.modules.linear.Identity), ('stages.0.blocks.0.layer_scale1', timm.models.metaformer.Scale), ('stages.0.blocks.0.res_scale1', torch.nn.modules.linear.Identity), ('stages.0.blocks.0.norm2', timm.layers.norm.GroupNorm1), ('stages.0.blocks.0.mlp', timm.layers.mlp.Mlp), ('stages.0.blocks.0.mlp.fc1', torch.nn.modules.conv.Conv2d), ('stages.0.blocks.0.mlp.act', torch.nn.modules.activation.GELU), ('stages.0.blocks.0.mlp.drop1', torch.nn.modules.dropout.Dropout), ('stages.0.blocks.0.mlp.norm', torch.nn.modules.linear.Identity), ('stages.0.blocks.0.mlp.fc2', torch.nn.modules.conv.Conv2d), ('stages.0.blocks.0.mlp.drop2', torch.nn.modules.dropout.Dropout), ('stages.0.blocks.0.drop_path2', torch.nn.modules.linear.Identity), ('stages.0.blocks.0.layer_scale2', timm.models.metaformer.Scale), ('stages.0.blocks.0.res_scale2', torch.nn.modules.linear.Identity), ('stages.0.blocks.1', timm.models.metaformer.MetaFormerBlock), ('stages.0.blocks.1.norm1', timm.layers.norm.GroupNorm1), ('stages.0.blocks.1.token_mixer', timm.models.metaformer.Pooling), ('stages.0.blocks.1.token_mixer.pool', torch.nn.modules.pooling.AvgPool2d), ... ('head.global_pool.flatten', torch.nn.modules.linear.Identity), ('head.norm', timm.layers.norm.LayerNorm2d), ('head.flatten', torch.nn.modules.flatten.Flatten), ('head.drop', torch.nn.modules.linear.Identity), ('head.fc', torch.nn.modules.linear.Linear)] ] ``` Upon closer inspection, we see that the 2D conv layers have names such as `"stages.0.blocks.0.mlp.fc1"` and `"stages.0.blocks.0.mlp.fc2"`. How can we match those layer names specifically? You can write a [regular expressions](https://docs.python.org/3/library/re.html) to match the layer names. For our case, the regex `r".*\.mlp\.fc\d"` should do the job. Furthermore, as in the first example, we should ensure that the output layer, in this case the classification head, is also updated. Looking at the end of the list printed above, we can see that it's named `'head.fc'`. With that in mind, here is our LoRA config: ```python config = LoraConfig(target_modules=r".*\.mlp\.fc\d", modules_to_save=["head.fc"]) ``` Then we only need to create the PEFT model by passing our base model and the config to `get_peft_model`: ```python peft_model = get_peft_model(model, config) peft_model.print_trainable_parameters() # prints trainable params: 1,064,454 || all params: 56,467,974 || trainable%: 1.88505789139876 ``` This shows us that we only need to train less than 2% of all parameters, which is a huge efficiency gain. For a complete example, check out [this notebook](https://github.com/huggingface/peft/blob/main/examples/image_classification/image_classification_timm_peft_lora.ipynb). ## New transformers architectures When new popular transformers architectures are released, we do our best to quickly add them to PEFT. If you come across a transformers model that is not supported out of the box, don't worry, it will most likely still work if the config is set correctly. Specifically, you have to identify the layers that should be adapted and set them correctly when initializing the corresponding config class, e.g. `LoraConfig`. Here are some tips to help with this. As a first step, it is a good idea is to check the existing models for inspiration. You can find them inside of [constants.py](https://github.com/huggingface/peft/blob/main/src/peft/utils/constants.py) in the PEFT repository. Often, you'll find a similar architecture that uses the same names. For example, if the new model architecture is a variation of the "mistral" model and you want to apply LoRA, you can see that the entry for "mistral" in `TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING` contains `["q_proj", "v_proj"]`. This tells you that for "mistral" models, the `target_modules` for LoRA should be `["q_proj", "v_proj"]`: ```python from peft import LoraConfig, get_peft_model my_mistral_model = ... config = LoraConfig( target_modules=["q_proj", "v_proj"], ..., # other LoRA arguments ) peft_model = get_peft_model(my_mistral_model, config) ``` If that doesn't help, check the existing modules in your model architecture with the `named_modules` method and try to identify the attention layers, especially the key, query, and value layers. Those will often have names such as `c_attn`, `query`, `q_proj`, etc. The key layer is not always adapted, and ideally, you should check whether including it results in better performance. Additionally, linear layers are common targets to be adapted (e.g. in [QLoRA paper](https://arxiv.org/abs/2305.14314), authors suggest to adapt them as well). Their names will often contain the strings `fc` or `dense`. If you want to add a new model to PEFT, please create an entry in [constants.py](https://github.com/huggingface/peft/blob/main/src/peft/utils/constants.py) and open a pull request on the [repository](https://github.com/huggingface/peft/pulls). Don't forget to update the [README](https://github.com/huggingface/peft#models-support-matrix) as well. ## Checking the result When you think that you have correctly specified the `target_modules` and called `get_peft_model`, you can check the fraction of parameters that will be trainable like this: ```python peft_model.print_trainable_parameters() ``` If this number is too low or high, check the model `repr` by printing the model. This will show you the names and type of all of all the layers in the model. Ensure that the intended layers, and only those, are replaced by adapter layers. For instance, for LoRA applied to `nn.Linear` layers, you should see that `lora.Linear` layers are being used. To get a quick overview of all layers that were adapted, you can also use the the `targeted_module_names` attribute: ```python print(peft_model.targeted_module_names) ``` This lists the names of each module that was actually adapted.
peft/docs/source/developer_guides/custom_models.md/0
{ "file_path": "peft/docs/source/developer_guides/custom_models.md", "repo_id": "peft", "token_count": 3726 }
150
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LoRA Low-Rank Adaptation ([LoRA](https://huggingface.co/papers/2309.15223)) is a PEFT method that decomposes a large matrix into two smaller low-rank matrices in the attention layers. This drastically reduces the number of parameters that need to be fine-tuned. The abstract from the paper is: *We propose a neural language modeling system based on low-rank adaptation (LoRA) for speech recognition output rescoring. Although pretrained language models (LMs) like BERT have shown superior performance in second-pass rescoring, the high computational cost of scaling up the pretraining stage and adapting the pretrained models to specific domains limit their practical use in rescoring. Here we present a method based on low-rank decomposition to train a rescoring BERT model and adapt it to new domains using only a fraction (0.08%) of the pretrained parameters. These inserted matrices are optimized through a discriminative training objective along with a correlation-based regularization loss. The proposed low-rank adaptation Rescore-BERT (LoRB) architecture is evaluated on LibriSpeech and internal datasets with decreased training times by factors between 5.4 and 3.6.*. ## LoraConfig [[autodoc]] tuners.lora.config.LoraConfig ## LoraModel [[autodoc]] tuners.lora.model.LoraModel
peft/docs/source/package_reference/lora.md/0
{ "file_path": "peft/docs/source/package_reference/lora.md", "repo_id": "peft", "token_count": 498 }
151
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Semantic segmentation using LoRA This guide demonstrates how to use LoRA, a low-rank approximation technique, to finetune a SegFormer model variant for semantic segmentation. By using LoRA from 🤗 PEFT, we can reduce the number of trainable parameters in the SegFormer model to only 14% of the original trainable parameters. LoRA achieves this reduction by adding low-rank "update matrices" to specific blocks of the model, such as the attention blocks. During fine-tuning, only these matrices are trained, while the original model parameters are left unchanged. At inference time, the update matrices are merged with the original model parameters to produce the final classification result. For more information on LoRA, please refer to the [original LoRA paper](https://arxiv.org/abs/2106.09685). ## Install dependencies Install the libraries required for model training: ```bash !pip install transformers accelerate evaluate datasets peft -q ``` ## Authenticate to share your model To share the finetuned model with the community at the end of the training, authenticate using your 🤗 token. You can obtain your token from your [account settings](https://huggingface.co/settings/token). ```python from huggingface_hub import notebook_login notebook_login() ``` ## Load a dataset To ensure that this example runs within a reasonable time frame, here we are limiting the number of instances from the training set of the [SceneParse150 dataset](https://huggingface.co/datasets/scene_parse_150) to 150. ```python from datasets import load_dataset ds = load_dataset("scene_parse_150", split="train[:150]") ``` Next, split the dataset into train and test sets. ```python ds = ds.train_test_split(test_size=0.1) train_ds = ds["train"] test_ds = ds["test"] ``` ## Prepare label maps Create a dictionary that maps a label id to a label class, which will be useful when setting up the model later: * `label2id`: maps the semantic classes of the dataset to integer ids. * `id2label`: maps integer ids back to the semantic classes. ```python import json from huggingface_hub import cached_download, hf_hub_url repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} num_labels = len(id2label) ``` ## Prepare datasets for training and evaluation Next, load the SegFormer image processor to prepare the images and annotations for the model. This dataset uses the zero-index as the background class, so make sure to set `do_reduce_labels=True` to subtract one from all labels since the background class is not among the 150 classes. ```python from transformers import AutoImageProcessor checkpoint = "nvidia/mit-b0" image_processor = AutoImageProcessor.from_pretrained(checkpoint, do_reduce_labels=True) ``` Add a function to apply data augmentation to the images, so that the model is more robust against overfitting. Here we use the [ColorJitter](https://pytorch.org/vision/stable/generated/torchvision.transforms.ColorJitter.html) function from [torchvision](https://pytorch.org/vision/stable/index.html) to randomly change the color properties of an image. ```python from torchvision.transforms import ColorJitter jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) ``` Add a function to handle grayscale images and ensure that each input image has three color channels, regardless of whether it was originally grayscale or RGB. The function converts RGB images to array as is, and for grayscale images that have only one color channel, the function replicates the same channel three times using `np.tile()` before converting the image into an array. ```python import numpy as np def handle_grayscale_image(image): np_image = np.array(image) if np_image.ndim == 2: tiled_image = np.tile(np.expand_dims(np_image, -1), 3) return Image.fromarray(tiled_image) else: return Image.fromarray(np_image) ``` Finally, combine everything in two functions that you'll use to transform training and validation data. The two functions are similar except data augmentation is applied only to the training data. ```python from PIL import Image def train_transforms(example_batch): images = [jitter(handle_grayscale_image(x)) for x in example_batch["image"]] labels = [x for x in example_batch["annotation"]] inputs = image_processor(images, labels) return inputs def val_transforms(example_batch): images = [handle_grayscale_image(x) for x in example_batch["image"]] labels = [x for x in example_batch["annotation"]] inputs = image_processor(images, labels) return inputs ``` To apply the preprocessing functions over the entire dataset, use the 🤗 Datasets `set_transform` function: ```python train_ds.set_transform(train_transforms) test_ds.set_transform(val_transforms) ``` ## Create evaluation function Including a metric during training is helpful for evaluating your model's performance. You can load an evaluation method with the [🤗 Evaluate](https://huggingface.co/docs/evaluate/index) library. For this task, use the [mean Intersection over Union (IoU)](https://huggingface.co/spaces/evaluate-metric/accuracy) metric (see the 🤗 Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour) to learn more about how to load and compute a metric): ```python import torch from torch import nn import evaluate metric = evaluate.load("mean_iou") def compute_metrics(eval_pred): with torch.no_grad(): logits, labels = eval_pred logits_tensor = torch.from_numpy(logits) logits_tensor = nn.functional.interpolate( logits_tensor, size=labels.shape[-2:], mode="bilinear", align_corners=False, ).argmax(dim=1) pred_labels = logits_tensor.detach().cpu().numpy() # currently using _compute instead of compute # see this issue for more info: https://github.com/huggingface/evaluate/pull/328#issuecomment-1286866576 metrics = metric._compute( predictions=pred_labels, references=labels, num_labels=len(id2label), ignore_index=0, reduce_labels=image_processor.do_reduce_labels, ) per_category_accuracy = metrics.pop("per_category_accuracy").tolist() per_category_iou = metrics.pop("per_category_iou").tolist() metrics.update({f"accuracy_{id2label[i]}": v for i, v in enumerate(per_category_accuracy)}) metrics.update({f"iou_{id2label[i]}": v for i, v in enumerate(per_category_iou)}) return metrics ``` ## Load a base model Before loading a base model, let's define a helper function to check the total number of parameters a model has, as well as how many of them are trainable. ```python def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param:.2f}" ) ``` Choose a base model checkpoint. For this example, we use the [SegFormer B0 variant](https://huggingface.co/nvidia/mit-b0). In addition to the checkpoint, pass the `label2id` and `id2label` dictionaries to let the `AutoModelForSemanticSegmentation` class know that we're interested in a custom base model where the decoder head should be randomly initialized using the classes from the custom dataset. ```python from transformers import AutoModelForSemanticSegmentation, TrainingArguments, Trainer model = AutoModelForSemanticSegmentation.from_pretrained( checkpoint, id2label=id2label, label2id=label2id, ignore_mismatched_sizes=True ) print_trainable_parameters(model) ``` At this point you can check with the `print_trainable_parameters` helper function that all 100% parameters in the base model (aka `model`) are trainable. ## Wrap the base model as a PeftModel for LoRA training To leverage the LoRa method, you need to wrap the base model as a `PeftModel`. This involves two steps: 1. Defining LoRa configuration with `LoraConfig` 2. Wrapping the original `model` with `get_peft_model()` using the config defined in the step above. ```python from peft import LoraConfig, get_peft_model config = LoraConfig( r=32, lora_alpha=32, target_modules=["query", "value"], lora_dropout=0.1, bias="lora_only", modules_to_save=["decode_head"], ) lora_model = get_peft_model(model, config) print_trainable_parameters(lora_model) ``` Let's review the `LoraConfig`. To enable LoRA technique, we must define the target modules within `LoraConfig` so that `PeftModel` can update the necessary matrices. Specifically, we want to target the `query` and `value` matrices in the attention blocks of the base model. These matrices are identified by their respective names, "query" and "value". Therefore, we should specify these names in the `target_modules` argument of `LoraConfig`. After we wrap our base model `model` with `PeftModel` along with the config, we get a new model where only the LoRA parameters are trainable (so-called "update matrices") while the pre-trained parameters are kept frozen. These include the parameters of the randomly initialized classifier parameters too. This is NOT we want when fine-tuning the base model on our custom dataset. To ensure that the classifier parameters are also trained, we specify `modules_to_save`. This also ensures that these modules are serialized alongside the LoRA trainable parameters when using utilities like `save_pretrained()` and `push_to_hub()`. In addition to specifying the `target_modules` within `LoraConfig`, we also need to specify the `modules_to_save`. When we wrap our base model with `PeftModel` and pass the configuration, we obtain a new model in which only the LoRA parameters are trainable, while the pre-trained parameters and the randomly initialized classifier parameters are kept frozen. However, we do want to train the classifier parameters. By specifying the `modules_to_save` argument, we ensure that the classifier parameters are also trainable, and they will be serialized alongside the LoRA trainable parameters when we use utility functions like `save_pretrained()` and `push_to_hub()`. Let's review the rest of the parameters: - `r`: The dimension used by the LoRA update matrices. - `alpha`: Scaling factor. - `bias`: Specifies if the `bias` parameters should be trained. `None` denotes none of the `bias` parameters will be trained. When all is configured, and the base model is wrapped, the `print_trainable_parameters` helper function lets us explore the number of trainable parameters. Since we're interested in performing **parameter-efficient fine-tuning**, we should expect to see a lower number of trainable parameters from the `lora_model` in comparison to the original `model` which is indeed the case here. You can also manually verify what modules are trainable in the `lora_model`. ```python for name, param in lora_model.named_parameters(): if param.requires_grad: print(name, param.shape) ``` This confirms that only the LoRA parameters appended to the attention blocks and the `decode_head` parameters are trainable. ## Train the model Start by defining your training hyperparameters in `TrainingArguments`. You can change the values of most parameters however you prefer. Make sure to set `remove_unused_columns=False`, otherwise the image column will be dropped, and it's required here. The only other required parameter is `output_dir` which specifies where to save your model. At the end of each epoch, the `Trainer` will evaluate the IoU metric and save the training checkpoint. Note that this example is meant to walk you through the workflow when using PEFT for semantic segmentation. We didn't perform extensive hyperparameter tuning to achieve optimal results. ```python model_name = checkpoint.split("/")[-1] training_args = TrainingArguments( output_dir=f"{model_name}-scene-parse-150-lora", learning_rate=5e-4, num_train_epochs=50, per_device_train_batch_size=4, per_device_eval_batch_size=2, save_total_limit=3, evaluation_strategy="epoch", save_strategy="epoch", logging_steps=5, remove_unused_columns=False, push_to_hub=True, label_names=["labels"], ) ``` Pass the training arguments to `Trainer` along with the model, dataset, and `compute_metrics` function. Call `train()` to finetune your model. ```python trainer = Trainer( model=lora_model, args=training_args, train_dataset=train_ds, eval_dataset=test_ds, compute_metrics=compute_metrics, ) trainer.train() ``` ## Save the model and run inference Use the `save_pretrained()` method of the `lora_model` to save the *LoRA-only parameters* locally. Alternatively, use the `push_to_hub()` method to upload these parameters directly to the Hugging Face Hub (as shown in the [Image classification using LoRA](image_classification_lora) task guide). ```python model_id = "segformer-scene-parse-150-lora" lora_model.save_pretrained(model_id) ``` We can see that the LoRA-only parameters are just **2.2 MB in size**! This greatly improves the portability when using very large models. ```bash !ls -lh {model_id} total 2.2M -rw-r--r-- 1 root root 369 Feb 8 03:09 adapter_config.json -rw-r--r-- 1 root root 2.2M Feb 8 03:09 adapter_model.bin ``` Let's now prepare an `inference_model` and run inference. ```python from peft import PeftConfig config = PeftConfig.from_pretrained(model_id) model = AutoModelForSemanticSegmentation.from_pretrained( checkpoint, id2label=id2label, label2id=label2id, ignore_mismatched_sizes=True ) inference_model = PeftModel.from_pretrained(model, model_id) ``` Get an image: ```python import requests url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-image.png" image = Image.open(requests.get(url, stream=True).raw) image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-image.png" alt="photo of a room"/> </div> Preprocess the image to prepare for inference. ```python encoding = image_processor(image.convert("RGB"), return_tensors="pt") ``` Run inference with the encoded image. ```python with torch.no_grad(): outputs = inference_model(pixel_values=encoding.pixel_values) logits = outputs.logits upsampled_logits = nn.functional.interpolate( logits, size=image.size[::-1], mode="bilinear", align_corners=False, ) pred_seg = upsampled_logits.argmax(dim=1)[0] ``` Next, visualize the results. We need a color palette for this. Here, we use ade_palette(). As it is a long array, so we don't include it in this guide, please copy it from [the TensorFlow Model Garden repository](https://github.com/tensorflow/models/blob/3f1ca33afe3c1631b733ea7e40c294273b9e406d/research/deeplab/utils/get_dataset_colormap.py#L51). ```python import matplotlib.pyplot as plt color_seg = np.zeros((pred_seg.shape[0], pred_seg.shape[1], 3), dtype=np.uint8) palette = np.array(ade_palette()) for label, color in enumerate(palette): color_seg[pred_seg == label, :] = color color_seg = color_seg[..., ::-1] # convert to BGR img = np.array(image) * 0.5 + color_seg * 0.5 # plot the image with the segmentation map img = img.astype(np.uint8) plt.figure(figsize=(15, 10)) plt.imshow(img) plt.show() ``` As you can see, the results are far from perfect, however, this example is designed to illustrate the end-to-end workflow of fine-tuning a semantic segmentation model with LoRa technique, and is not aiming to achieve state-of-the-art results. The results you see here are the same as you would get if you performed full fine-tuning on the same setup (same model variant, same dataset, same training schedule, etc.), except LoRA allows to achieve them with a fraction of total trainable parameters and in less time. If you wish to use this example and improve the results, here are some things that you can try: * Increase the number of training samples. * Try a larger SegFormer model variant (explore available model variants on the [Hugging Face Hub](https://huggingface.co/models?search=segformer)). * Try different values for the arguments available in `LoraConfig`. * Tune the learning rate and batch size.
peft/docs/source/task_guides/semantic_segmentation_lora.md/0
{ "file_path": "peft/docs/source/task_guides/semantic_segmentation_lora.md", "repo_id": "peft", "token_count": 5536 }
152
<jupyter_start><jupyter_code>from transformers import AutoModelForSeq2SeqLM from peft import PeftModel, PeftConfig import torch from datasets import load_dataset import os from transformers import AutoTokenizer from torch.utils.data import DataLoader from transformers import default_data_collator, get_linear_schedule_with_warmup from tqdm import tqdm from datasets import load_dataset dataset_name = "twitter_complaints" text_column = "Tweet text" label_column = "text_label" batch_size = 8 peft_model_id = "smangrul/twitter_complaints_bigscience_T0_3B_LORA_SEQ_2_SEQ_LM" config = PeftConfig.from_pretrained(peft_model_id) peft_model_id = "smangrul/twitter_complaints_bigscience_T0_3B_LORA_SEQ_2_SEQ_LM" max_memory = {0: "6GIB", 1: "0GIB", 2: "0GIB", 3: "0GIB", 4: "0GIB", "cpu": "30GB"} config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path, device_map="auto", max_memory=max_memory) model = PeftModel.from_pretrained(model, peft_model_id, device_map="auto", max_memory=max_memory) from datasets import load_dataset dataset = load_dataset("ought/raft", dataset_name) classes = [k.replace("_", " ") for k in dataset["train"].features["Label"].names] print(classes) dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["Label"]]}, batched=True, num_proc=1, ) print(dataset) dataset["train"][0] tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes]) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, truncation=True) labels = tokenizer( targets, max_length=target_max_length, padding="max_length", truncation=True, return_tensors="pt" ) labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=True, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["train"] test_dataset = processed_datasets["test"] def collate_fn(examples): return tokenizer.pad(examples, padding="longest", return_tensors="pt") train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_dataset, collate_fn=collate_fn, batch_size=batch_size, pin_memory=True) test_dataloader = DataLoader(test_dataset, collate_fn=collate_fn, batch_size=batch_size, pin_memory=True) model.eval() i = 15 inputs = tokenizer(f'{text_column} : {dataset["test"][i]["Tweet text"]} Label : ', return_tensors="pt") print(dataset["test"][i]["Tweet text"]) print(inputs) with torch.no_grad(): outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=10) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)) model.eval() eval_preds = [] for _, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to("cuda") for k, v in batch.items() if k != "labels"} with torch.no_grad(): outputs = model.generate(**batch, max_new_tokens=10) preds = outputs.detach().cpu().numpy() eval_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True)) correct = 0 total = 0 for pred, true in zip(eval_preds, dataset["train"][label_column]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 print(f"{accuracy=}") print(f"{eval_preds[:10]=}") print(f"{dataset['train'][label_column][:10]=}") model.eval() test_preds = [] for _, batch in enumerate(tqdm(test_dataloader)): batch = {k: v for k, v in batch.items() if k != "labels"} with torch.no_grad(): outputs = model.generate(**batch, max_new_tokens=10) preds = outputs.detach().cpu().numpy() test_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True)) if len(test_preds) > 100: break test_preds<jupyter_output><empty_output>
peft/examples/conditional_generation/peft_lora_seq2seq_accelerate_big_model_inference.ipynb/0
{ "file_path": "peft/examples/conditional_generation/peft_lora_seq2seq_accelerate_big_model_inference.ipynb", "repo_id": "peft", "token_count": 1695 }
153
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from datasets import load_dataset from torch.utils.data import DataLoader, Dataset from transformers import AutoModelForVision2Seq, AutoProcessor from peft import LoraConfig, get_peft_model # Let's define the LoraConfig config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", ) # We load our model and processor using `transformers` model = AutoModelForVision2Seq.from_pretrained("Salesforce/blip2-opt-2.7b", load_in_8bit=True) processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") # Get our peft model and print the number of trainable parameters model = get_peft_model(model, config) model.print_trainable_parameters() # Let's load the dataset here! dataset = load_dataset("ybelkada/football-dataset", split="train") class ImageCaptioningDataset(Dataset): def __init__(self, dataset, processor): self.dataset = dataset self.processor = processor def __len__(self): return len(self.dataset) def __getitem__(self, idx): item = self.dataset[idx] encoding = self.processor(images=item["image"], padding="max_length", return_tensors="pt") # remove batch dimension encoding = {k: v.squeeze() for k, v in encoding.items()} encoding["text"] = item["text"] return encoding def collator(batch): # pad the input_ids and attention_mask processed_batch = {} for key in batch[0].keys(): if key != "text": processed_batch[key] = torch.stack([example[key] for example in batch]) else: text_inputs = processor.tokenizer( [example["text"] for example in batch], padding=True, return_tensors="pt" ) processed_batch["input_ids"] = text_inputs["input_ids"] processed_batch["attention_mask"] = text_inputs["attention_mask"] return processed_batch train_dataset = ImageCaptioningDataset(dataset, processor) train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=2, collate_fn=collator) optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5) device = "cuda" if torch.cuda.is_available() else "cpu" model.train() for epoch in range(50): print("Epoch:", epoch) for idx, batch in enumerate(train_dataloader): input_ids = batch.pop("input_ids").to(device) pixel_values = batch.pop("pixel_values").to(device, torch.float16) outputs = model(input_ids=input_ids, pixel_values=pixel_values, labels=input_ids) loss = outputs.loss print("Loss:", loss.item()) loss.backward() optimizer.step() optimizer.zero_grad() if idx % 10 == 0: generated_output = model.generate(pixel_values=pixel_values) print(processor.batch_decode(generated_output, skip_special_tokens=True))
peft/examples/int8_training/fine_tune_blip2_int8.py/0
{ "file_path": "peft/examples/int8_training/fine_tune_blip2_int8.py", "repo_id": "peft", "token_count": 1293 }
154
<jupyter_start><jupyter_text>Dreambooth with OFTThis Notebook assumes that you already ran the train_dreambooth.py script to create your own adapter.<jupyter_code>from diffusers import DiffusionPipeline from diffusers.utils import check_min_version, get_logger from peft import PeftModel # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.10.0.dev0") logger = get_logger(__name__) BASE_MODEL_NAME = "stabilityai/stable-diffusion-2-1-base" ADAPTER_MODEL_PATH = "INSERT MODEL PATH HERE" pipe = DiffusionPipeline.from_pretrained( BASE_MODEL_NAME, ) pipe.to("cuda") pipe.unet = PeftModel.from_pretrained(pipe.unet, ADAPTER_MODEL_PATH + "/unet", adapter_name="default") pipe.text_encoder = PeftModel.from_pretrained( pipe.text_encoder, ADAPTER_MODEL_PATH + "/text_encoder", adapter_name="default" ) prompt = "A photo of a sks dog" image = pipe( prompt, num_inference_steps=50, height=512, width=512, ).images[0] image<jupyter_output><empty_output>
peft/examples/oft_dreambooth/oft_dreambooth_inference.ipynb/0
{ "file_path": "peft/examples/oft_dreambooth/oft_dreambooth_inference.ipynb", "repo_id": "peft", "token_count": 376 }
155
[tool.black] line-length = 119 target-version = ['py36'] [tool.ruff] ignore = ["C901", "E501", "E741", "W605"] select = ["C", "E", "F", "I", "W"] line-length = 119 [tool.ruff.isort] lines-after-imports = 2 known-first-party = ["peft"] [isort] default_section = "FIRSTPARTY" known_first_party = "peft" known_third_party = [ "numpy", "torch", "accelerate", "transformers", ] line_length = 119 lines_after_imports = 2 multi_line_output = 3 include_trailing_comma = true force_grid_wrap = 0 use_parentheses = true ensure_newline_before_comments = true [tool.pytest] doctest_optionflags = [ "NORMALIZE_WHITESPACE", "ELLIPSIS", "NUMBER", ] [tool.pytest.ini_options] addopts = "--cov=src/peft --cov-report=term-missing" markers = [ "single_gpu_tests: tests that run on a single GPU", "multi_gpu_tests: tests that run on multiple GPUs", "regression: whether to run regression suite test", "bitsandbytes: select bitsandbytes integration tests" ]
peft/pyproject.toml/0
{ "file_path": "peft/pyproject.toml", "repo_id": "peft", "token_count": 395 }
156
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from peft.import_utils import is_bnb_4bit_available, is_bnb_available from .config import AdaLoraConfig from .gptq import SVDQuantLinear from .layer import AdaLoraLayer, RankAllocator, SVDLinear from .model import AdaLoraModel __all__ = ["AdaLoraConfig", "AdaLoraLayer", "AdaLoraModel", "SVDLinear", "RankAllocator", "SVDQuantLinear"] def __getattr__(name): if (name == "SVDLinear8bitLt") and is_bnb_available(): from .bnb import SVDLinear8bitLt return SVDLinear8bitLt if (name == "SVDLinear4bit") and is_bnb_4bit_available(): from .bnb import SVDLinear4bit return SVDLinear4bit raise AttributeError(f"module {__name__} has no attribute {name}")
peft/src/peft/tuners/adalora/__init__.py/0
{ "file_path": "peft/src/peft/tuners/adalora/__init__.py", "repo_id": "peft", "token_count": 436 }
157
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod import torch from torch import nn from torch.distributions.relaxed_bernoulli import RelaxedBernoulli from .config import PolyConfig EPS = 1e-12 def get_router(poly_config: PolyConfig) -> nn.Module: if poly_config.poly_type == "poly": return PolyRouter(poly_config) else: raise ValueError( f"Unsupported poly_type: {poly_config.poly_type}. " "Currently, only the following types are supported: " "`poly`." ) class Router(nn.Module, ABC): @abstractmethod def reset(self): ... @abstractmethod def forward(self, task_ids: torch.Tensor, input_ids: torch.Tensor): ... class PolyRouter(Router): # It's a simplified implementation of # https://github.com/microsoft/mttl/blob/ce4ca51dbca73be656feb9b3e5233633e3c5dec7/mttl/models/poly.py#L138 def __init__(self, poly_config: PolyConfig): super().__init__() self.poly_type = poly_config.poly_type self.n_tasks = poly_config.n_tasks self.n_skills = poly_config.n_skills self.n_splits = poly_config.n_splits self.module_logits = nn.Parameter(torch.empty((self.n_tasks, self.n_splits * self.n_skills))) def reset(self): torch.nn.init.uniform_(self.module_logits, -1e-3, 1e-3) def forward(self, task_ids: torch.Tensor, input_ids: torch.Tensor): if task_ids is None: raise ValueError("task_ids should not be None.") if task_ids.max().item() >= self.n_tasks: raise ValueError(f"Only {self.n_tasks} tasks available. Found task id = {task_ids.max().item()}") # move task id to input's device task_ids = task_ids.to(self.module_logits.device) module_logits = self.module_logits[task_ids] module_logits = module_logits.view(-1, self.n_splits, self.n_skills) if self.training: module_logits = RelaxedBernoulli(temperature=1.0, logits=module_logits).rsample() else: module_logits = torch.sigmoid(module_logits) module_weights = module_logits / (module_logits.sum(dim=-1, keepdim=True) + EPS) return module_weights
peft/src/peft/tuners/poly/router.py/0
{ "file_path": "peft/src/peft/tuners/poly/router.py", "repo_id": "peft", "token_count": 1124 }
158
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Regression testing: check that checkpoints from previous PEFT versions still return the same values. # # For normal regression testing, just run: # # `pytest tests/regression/test_regression.py -s --regression` # # Add `-s` to show potentially useful debugging information. `--regression` is a custom marker that is required for # regression tests not to be skipped. # # To create new regression tests, run: # `HF_TOKEN=<token> REGRESSION_CREATION_MODE=True pytest tests/regression/test_regression.py -s --regression` # # This will *fail* if: # # 1. the git worktree is dirty # 2. the git commit is not tagged # # Note: A Hugging Face Hub token is required to upload the regression artifacts to our # https://huggingface.co/peft-internal-testing repo. This can be done by anyone with write access to the repo but # apparently it is not possible to create a technical token with write access. # # This is important to ensure that the regression artifacts correspond to a specific released version of PEFT. # Therefore, it is recommended to checkout the tag before running the regression tests, e.g. by running: # # `git checkout v0.1.0` # # To override these checks, run: # ``HF_TOKEN=<token> REGRESSION_CREATION_MODE=True REGRESSION_FORCE_MODE=True pytest tests/regression/test_regression.py -s --regression` # # In REGRESSION_CREATION_MODE, one directory will be created in tests/regression/<TEST_NAME>/<PEFT_VERSION>/ for each # test. This will contain the saved adapter, as well as the output of the test of the model for that version. # # In normal testing mode, the saved adapter and output for each version found in the directory # tests/regression/<TEST_NAME>/ will be loaded and compared to the current output. # # When implementing new tests, check the existing ones as well as the description in the docstring of RegressionTester. import os import shutil import subprocess import sys import tempfile import unittest import pytest import torch from huggingface_hub import snapshot_download, upload_folder from torch import nn from transformers import AutoModelForCausalLM, BitsAndBytesConfig from transformers.pytorch_utils import Conv1D import peft from peft import AdaLoraConfig, IA3Config, LoHaConfig, LoKrConfig, LoraConfig, PeftModel, get_peft_model from peft.utils import infer_device PEFT_VERSION = peft.__version__ REGRESSION_DIR = tempfile.mkdtemp(prefix="peft_regression_") HF_TOKEN = os.environ.get("HF_TOKEN") # the repo has to be created manually once, it is not automatically created HF_REPO = "peft-internal-testing/regression-tests" @pytest.fixture(scope="session", autouse=True) def setup_tearndown(): # Use a pytest session-scoped fixture to setup and teardown exactly once per session. AFAICT, unittest does not # provide such a feature # download regression artifacts from Hugging Face Hub at the start snapshot_download( repo_id=HF_REPO, local_dir=REGRESSION_DIR, # Don't use symlink, because this prevents us from properly cleaning up the files once finished local_dir_use_symlinks=False, ) yield # delete regression artifacts at the end of the test session; optionally, upload them first if in creation mode creation_mode = strtobool(os.environ.get("REGRESSION_CREATION_MODE", "False")) if creation_mode: # upload the regression directory to Hugging Face Hub, will overwrite by default upload_folder( repo_id=HF_REPO, folder_path=REGRESSION_DIR, token=HF_TOKEN, ) shutil.rmtree(REGRESSION_DIR) def strtobool(val): """Copied from distutils.util""" val = val.lower() if val in ("y", "yes", "t", "true", "on", "1"): return 1 elif val in ("n", "no", "f", "false", "off", "0"): return 0 else: raise ValueError("invalid truth value {!r}".format(val)) # same as in ..testing_utils.py but cannot be imported def require_torch_gpu(test_case): """ Decorator marking a test that requires a GPU. Will be skipped when no GPU is available. Copies from tsting_utils.py. """ if not torch.cuda.is_available(): return unittest.skip("test requires GPU")(test_case) else: return test_case # same as in ..testing_utils.py but cannot be imported def require_bitsandbytes(test_case): """ Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library is not installed. Copies from tsting_utils.py. """ try: import bitsandbytes # noqa: F401 except ImportError: return unittest.skip("test requires bitsandbytes")(test_case) else: return test_case def save_output(output, name, force=False): path = os.path.join(REGRESSION_DIR, name, PEFT_VERSION) filename = os.path.join(path, "output.pt") if os.path.exists(filename) and not force: return if not os.path.exists(path): os.makedirs(path) if os.path.exists(filename) and force: print(f"Overriding existing output in {filename}", file=sys.stderr) torch.save(output, filename) def save_model(model, name, force=False): path = os.path.join(REGRESSION_DIR, name, PEFT_VERSION) filename = os.path.join(path, peft.utils.SAFETENSORS_WEIGHTS_NAME) if os.path.exists(filename) and not force: return if not os.path.exists(path): os.makedirs(path) if os.path.exists(filename) and force: print(f"Overriding existing model in {path}", file=sys.stderr) model.save_pretrained(path) def load_output(name): filename = os.path.join(REGRESSION_DIR, name, "output.pt") return torch.load(filename) @pytest.mark.regression class RegressionTester(unittest.TestCase): """Base class for regression testing Child classes must call assert_results_equal_or_store and pass the model outtput, as well as a unique name that describes the setting (e.g. "lora_opt-350m_bnb_4bit"). They also need to implement get_output(model) to get the model output, and load_base_model(name) to load the base model. Don't forget to fix the seed in load_base_model. """ torch_device = infer_device() def setUp(self): self.tol = 1e-4 self.creation_mode = strtobool(os.environ.get("REGRESSION_CREATION_MODE", "False")) self.force_mode = strtobool(os.environ.get("REGRESSION_FORCE_MODE", "False")) if self.force_mode and not self.creation_mode: raise RuntimeError("REGRESSION_FORCE_MODE can only be used together with REGRESSION_CREATION_MODE") if self.creation_mode: self.check_clean_git_status(self.force_mode) if HF_TOKEN is None: raise RuntimeError("HF_TOKEN environment variable must be set in creation mode") def fix_seed(self): torch.manual_seed(0) def check_clean_git_status(self, force): """Ensure that worktree is not dirty and version tag is checked out""" # check that the worktree is clean try: subprocess.check_output(["git", "diff", "--quiet", "HEAD"]) except subprocess.CalledProcessError as exc: if force: print("Overriding despite dirty git worktree", file=sys.stderr) else: raise RuntimeError("Git worktree is dirty") from exc # check that the commit is tagged try: subprocess.check_output(["git", "describe", "--exact-match", "HEAD"]) except subprocess.CalledProcessError as exc: if force: print("Overriding despite non-tagged commit", file=sys.stderr) else: raise RuntimeError("Git commit is not tagged") from exc def assert_results_equal_or_store(self, model, name): """Check if the outputs are the same or save the outputs if in creation mode.""" if not self.creation_mode: # normal regression testing mode self._assert_results_equal(name) else: output = self.get_output(model) if not torch.isfinite(output).all(): raise RuntimeError(f"Model output for {name} is not finite") output2 = self.get_output(model) if not torch.allclose(output, output2): raise RuntimeError(f"Model output for {name} is not deterministic") save_output(output, name, force=self.force_mode) save_model(model, name, force=self.force_mode) def _assert_results_equal(self, name): path = os.path.join(REGRESSION_DIR, name) versions = os.listdir(path) for version in versions: # each directory corresponds to a version output_loaded = load_output(os.path.join(name, version)) base_model = self.load_base_model() model = PeftModel.from_pretrained(base_model, os.path.join(path, version)) output = self.get_output(model) self.assertTrue(torch.allclose(output_loaded, output, atol=self.tol, rtol=self.tol)) def get_output(self, model): raise NotImplementedError def load_base_model(self): raise NotImplementedError ############## # TEST CASES # ############## class TestMlp(RegressionTester): def get_output(self, model): input = torch.arange(90).reshape(9, 10).to(self.torch_device) with torch.inference_mode(): output = model(input) return output def load_base_model(self): class MLP(nn.Module): def __init__(self, bias=True): super().__init__() self.lin0 = nn.Linear(10, 20, bias=bias) self.relu = nn.ReLU() self.lin1 = nn.Linear(20, 2, bias=bias) self.sm = nn.LogSoftmax(dim=-1) def forward(self, X): X = X.float() X = self.lin0(X) X = self.relu(X) X = self.lin1(X) X = self.sm(X) return X self.fix_seed() return MLP().to(self.torch_device) def test_lora(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, target_modules=["lin0"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_mlp") def test_adalora(self): base_model = self.load_base_model() config = AdaLoraConfig( r=8, init_lora_weights=False, target_modules=["lin0"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "adalora_mlp") def test_ia3(self): base_model = self.load_base_model() config = IA3Config( init_ia3_weights=False, target_modules=["lin0"], feedforward_modules=["lin0"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "ia3_mlp") def test_ia3_no_ff(self): base_model = self.load_base_model() config = IA3Config( init_ia3_weights=False, target_modules=["lin0"], feedforward_modules=[], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "ia3_no_ff_mlp") def test_loha(self): # TODO self.skipTest("Skipping LoHa for now because init is not seedable") base_model = self.load_base_model() config = LoHaConfig( r=8, init_weights=False, target_modules=["lin0"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "loha_mlp") def test_lokr(self): # TODO self.skipTest("Skipping LoKr for now because init is not seedable") base_model = self.load_base_model() config = LoKrConfig( r=8, target_modules=["lin0"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lokr_mlp") def test_lora_modules_to_save(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, target_modules=["lin0"], modules_to_save=["lin1"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_mlp_modules_to_save") class TestLoraEmbConv1D(RegressionTester): def get_output(self, model): input = torch.arange(90).reshape(9, 10).to(self.torch_device) with torch.inference_mode(): output = model(input) return output def load_base_model(self): class ModelEmbConv1D(nn.Module): def __init__(self): super().__init__() self.emb = nn.Embedding(100, 5) self.conv1d = Conv1D(1, 5) self.relu = nn.ReLU() self.flat = nn.Flatten() self.lin0 = nn.Linear(10, 2) self.sm = nn.LogSoftmax(dim=-1) def forward(self, X): X = self.emb(X) X = self.conv1d(X) X = self.relu(X) X = self.flat(X) X = self.lin0(X) X = self.sm(X) return X self.fix_seed() return ModelEmbConv1D().to(self.torch_device) def test_lora(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, target_modules=["emb", "conv1d"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_emb_conv1d") class TestLoraConv2D(RegressionTester): def get_output(self, model): input = torch.arange(90).reshape(9, 10).to(self.torch_device) with torch.inference_mode(): output = model(input) return output def load_base_model(self): class ModelConv2D(nn.Module): def __init__(self): super().__init__() self.conv2d = nn.Conv2d(5, 10, 3) self.relu = nn.ReLU() self.flat = nn.Flatten() self.lin0 = nn.Linear(10, 2) self.sm = nn.LogSoftmax(dim=-1) def forward(self, X): X = X.float().reshape(2, 5, 3, 3) X = self.conv2d(X) X = self.relu(X) X = self.flat(X) X = self.lin0(X) X = self.sm(X) return X self.fix_seed() return ModelConv2D().to(self.torch_device) def test_lora(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, target_modules=["conv2d"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_conv2d") def test_ia3(self): base_model = self.load_base_model() config = IA3Config( init_ia3_weights=False, target_modules=["conv2d"], feedforward_modules=["conv2d"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "ia3_conv2d") def test_loha(self): # TODO self.skipTest("Skipping LoHa for now because init is not seedable") base_model = self.load_base_model() config = LoHaConfig( r=8, init_weights=False, target_modules=["conv2d"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "loha_conv2d") def test_lokr(self): # TODO self.skipTest("Skipping LoKr for now because init is not seedable") base_model = self.load_base_model() config = LoKrConfig( r=8, init_weights=False, target_modules=["conv2d"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lokr_conv2d") class TestOpt(RegressionTester): def get_output(self, model): input = torch.LongTensor([[1, 0, 1, 0, 1, 2]]).to(self.torch_device) with torch.inference_mode(): output = model(input).logits return output def load_base_model(self): self.fix_seed() return AutoModelForCausalLM.from_pretrained("facebook/opt-350m").to(self.torch_device) def test_lora(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_opt-350m") def test_adalora(self): base_model = self.load_base_model() config = AdaLoraConfig( r=8, init_lora_weights=False, ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "adalora_opt-350m") def test_ia3(self): base_model = self.load_base_model() config = IA3Config(init_ia3_weights=False) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "ia3_opt-350m") @require_torch_gpu @require_bitsandbytes class TestOpt8bitBnb(RegressionTester): def get_output(self, model): input = torch.LongTensor([[1, 0, 1, 0, 1, 2]]).to(self.torch_device) with torch.inference_mode(): output = model(input).logits return output def load_base_model(self): self.fix_seed() model = AutoModelForCausalLM.from_pretrained( "facebook/opt-350m", load_in_8bit=True, ) return model def test_lora_8bit(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_opt-350m_bnb_8bit") def test_adalora(self): # TODO self.skipTest( "Skipping AdaLora for now, getting TypeError: unsupported operand type(s) for +=: 'dict' and 'Tensor'" ) base_model = self.load_base_model() config = AdaLoraConfig( init_r=6, target_r=4, tinit=50, tfinal=100, deltaT=5, beta1=0.3, beta2=0.3, orth_reg_weight=0.2, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "adalora_opt-350m_8bit") @require_torch_gpu @require_bitsandbytes class TestOpt4bitBnb(RegressionTester): def get_output(self, model): input = torch.LongTensor([[1, 0, 1, 0, 1, 2]]).to(self.torch_device) with torch.inference_mode(): output = model(input).logits return output def load_base_model(self): self.fix_seed() bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=False, bnb_4bit_compute_type=torch.float32, ) model = AutoModelForCausalLM.from_pretrained( "facebook/opt-350m", quantization_config=bnb_config, torch_dtype=torch.float32, ) return model def test_lora_4bit(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_opt-350m_bnb_4bit") def test_adalora(self): # TODO self.skipTest("Skipping AdaLora for now because of a bug, see #1113") base_model = self.load_base_model() config = AdaLoraConfig( init_r=6, target_r=4, tinit=50, tfinal=100, deltaT=5, beta1=0.3, beta2=0.3, orth_reg_weight=0.2, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "adalora_opt-350m_4bit")
peft/tests/regression/test_regression.py/0
{ "file_path": "peft/tests/regression/test_regression.py", "repo_id": "peft", "token_count": 9657 }
159
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import torch from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from peft import PeftModel, PolyConfig, TaskType, get_peft_model class TestPoly(unittest.TestCase): def test_poly(self): torch.manual_seed(0) model_name_or_path = "google/flan-t5-small" atol, rtol = 1e-6, 1e-6 r = 8 # rank of lora in poly n_tasks = 3 # number of tasks n_skills = 2 # number of skills (loras) n_splits = 4 # number of heads lr = 1e-2 num_epochs = 10 tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) base_model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) peft_config = PolyConfig( task_type=TaskType.SEQ_2_SEQ_LM, poly_type="poly", r=r, n_tasks=n_tasks, n_skills=n_skills, n_splits=n_splits, ) model = get_peft_model(base_model, peft_config) # generate some dummy data text = os.__doc__.splitlines() self.assertTrue(len(text) > 10) inputs = tokenizer(text, return_tensors="pt", padding=True) inputs["task_ids"] = torch.arange(len(text)) % n_tasks inputs["labels"] = tokenizer((["A", "B"] * 100)[: len(text)], return_tensors="pt")["input_ids"] # simple training loop model.train() optimizer = torch.optim.Adam(model.parameters(), lr=lr) losses = [] for _ in range(num_epochs): outputs = model(**inputs) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() losses.append(loss.item()) # loss improved by at least 50% self.assertLess(losses[-1], 0.5 * losses[0]) # check that saving and loading works torch.manual_seed(0) model.eval() logits_before = model(**inputs).logits tokens_before = model.generate(**inputs) with model.disable_adapter(): logits_disabled = model(**inputs).logits tokens_disabled = model.generate(**inputs) self.assertFalse(torch.allclose(logits_before, logits_disabled, atol=atol, rtol=rtol)) self.assertFalse(torch.allclose(tokens_before, tokens_disabled, atol=atol, rtol=rtol)) # saving and loading with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) base_model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) loaded = PeftModel.from_pretrained(base_model, tmp_dir) torch.manual_seed(0) output_after = loaded(**inputs).logits tokens_after = loaded.generate(**inputs) self.assertTrue(torch.allclose(logits_before, output_after, atol=atol, rtol=rtol)) self.assertTrue(torch.allclose(tokens_before, tokens_after, atol=atol, rtol=rtol))
peft/tests/test_poly.py/0
{ "file_path": "peft/tests/test_poly.py", "repo_id": "peft", "token_count": 1567 }
160
import argparse import hashlib import os import mxnet as mx import gluoncv import torch from timm import create_model parser = argparse.ArgumentParser(description='Convert from MXNet') parser.add_argument('--model', default='all', type=str, metavar='MODEL', help='Name of model to train (default: "all"') def convert(mxnet_name, torch_name): # download and load the pre-trained model net = gluoncv.model_zoo.get_model(mxnet_name, pretrained=True) # create corresponding torch model torch_net = create_model(torch_name) mxp = [(k, v) for k, v in net.collect_params().items() if 'running' not in k] torchp = list(torch_net.named_parameters()) torch_params = {} # convert parameters # NOTE: we are relying on the fact that the order of parameters # are usually exactly the same between these models, thus no key name mapping # is necessary. Asserts will trip if this is not the case. for (tn, tv), (mn, mv) in zip(torchp, mxp): m_split = mn.split('_') t_split = tn.split('.') print(t_split, m_split) print(tv.shape, mv.shape) # ensure ordering of BN params match since their sizes are not specific if m_split[-1] == 'gamma': assert t_split[-1] == 'weight' if m_split[-1] == 'beta': assert t_split[-1] == 'bias' # ensure shapes match assert all(t == m for t, m in zip(tv.shape, mv.shape)) torch_tensor = torch.from_numpy(mv.data().asnumpy()) torch_params[tn] = torch_tensor # convert buffers (batch norm running stats) mxb = [(k, v) for k, v in net.collect_params().items() if any(x in k for x in ['running_mean', 'running_var'])] torchb = [(k, v) for k, v in torch_net.named_buffers() if 'num_batches' not in k] for (tn, tv), (mn, mv) in zip(torchb, mxb): print(tn, mn) print(tv.shape, mv.shape) # ensure ordering of BN params match since their sizes are not specific if 'running_var' in tn: assert 'running_var' in mn if 'running_mean' in tn: assert 'running_mean' in mn torch_tensor = torch.from_numpy(mv.data().asnumpy()) torch_params[tn] = torch_tensor torch_net.load_state_dict(torch_params) torch_filename = './%s.pth' % torch_name torch.save(torch_net.state_dict(), torch_filename) with open(torch_filename, 'rb') as f: sha_hash = hashlib.sha256(f.read()).hexdigest() final_filename = os.path.splitext(torch_filename)[0] + '-' + sha_hash[:8] + '.pth' os.rename(torch_filename, final_filename) print("=> Saved converted model to '{}, SHA256: {}'".format(final_filename, sha_hash)) def map_mx_to_torch_model(mx_name): torch_name = mx_name.lower() if torch_name.startswith('se_'): torch_name = torch_name.replace('se_', 'se') elif torch_name.startswith('senet_'): torch_name = torch_name.replace('senet_', 'senet') elif torch_name.startswith('inceptionv3'): torch_name = torch_name.replace('inceptionv3', 'inception_v3') torch_name = 'gluon_' + torch_name return torch_name ALL = ['resnet18_v1b', 'resnet34_v1b', 'resnet50_v1b', 'resnet101_v1b', 'resnet152_v1b', 'resnet50_v1c', 'resnet101_v1c', 'resnet152_v1c', 'resnet50_v1d', 'resnet101_v1d', 'resnet152_v1d', #'resnet50_v1e', 'resnet101_v1e', 'resnet152_v1e', 'resnet50_v1s', 'resnet101_v1s', 'resnet152_v1s', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_64x4d', 'se_resnext50_32x4d', 'se_resnext101_32x4d', 'se_resnext101_64x4d', 'senet_154', 'inceptionv3'] def main(): args = parser.parse_args() if not args.model or args.model == 'all': for mx_model in ALL: torch_model = map_mx_to_torch_model(mx_model) convert(mx_model, torch_model) else: mx_model = args.model torch_model = map_mx_to_torch_model(mx_model) convert(mx_model, torch_model) if __name__ == '__main__': main()
pytorch-image-models/convert/convert_from_mxnet.py/0
{ "file_path": "pytorch-image-models/convert/convert_from_mxnet.py", "repo_id": "pytorch-image-models", "token_count": 1786 }
161
# CSP-ResNet **CSPResNet** is a convolutional neural network where we apply the Cross Stage Partial Network (CSPNet) approach to [ResNet](https://paperswithcode.com/method/resnet). The CSPNet partitions the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network. {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{wang2019cspnet, title={CSPNet: A New Backbone that can Enhance Learning Capability of CNN}, author={Chien-Yao Wang and Hong-Yuan Mark Liao and I-Hau Yeh and Yueh-Hua Wu and Ping-Yang Chen and Jun-Wei Hsieh}, year={2019}, eprint={1911.11929}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: CSP ResNet Paper: Title: 'CSPNet: A New Backbone that can Enhance Learning Capability of CNN' URL: https://paperswithcode.com/paper/cspnet-a-new-backbone-that-can-enhance Models: - Name: cspresnet50 In Collection: CSP ResNet Metadata: FLOPs: 5924992000 Parameters: 21620000 File Size: 86679303 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - Label Smoothing - Polynomial Learning Rate Decay - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: cspresnet50 LR: 0.1 Layers: 50 Crop Pct: '0.887' Momentum: 0.9 Batch Size: 128 Image Size: '256' Weight Decay: 0.005 Interpolation: bilinear Training Steps: 8000000 Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/cspnet.py#L415 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.57% Top 5 Accuracy: 94.71% -->
pytorch-image-models/docs/models/.templates/models/csp-resnet.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/csp-resnet.md", "repo_id": "pytorch-image-models", "token_count": 897 }
162
# (Gluon) Xception **Xception** is a convolutional neural network architecture that relies solely on [depthwise separable convolution](https://paperswithcode.com/method/depthwise-separable-convolution) layers. The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{chollet2017xception, title={Xception: Deep Learning with Depthwise Separable Convolutions}, author={François Chollet}, year={2017}, eprint={1610.02357}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Gloun Xception Paper: Title: 'Xception: Deep Learning with Depthwise Separable Convolutions' URL: https://paperswithcode.com/paper/xception-deep-learning-with-depthwise Models: - Name: gluon_xception65 In Collection: Gloun Xception Metadata: FLOPs: 17594889728 Parameters: 39920000 File Size: 160551306 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Depthwise Separable Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_xception65 Crop Pct: '0.903' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_xception.py#L241 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_xception-7015a15c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.7% Top 5 Accuracy: 94.87% -->
pytorch-image-models/docs/models/.templates/models/gloun-xception.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/gloun-xception.md", "repo_id": "pytorch-image-models", "token_count": 747 }
163
# RegNetX **RegNetX** is a convolutional network design space with simple, regular models with parameters: depth $d$, initial width $w\_{0} > 0$, and slope $w\_{a} > 0$, and generates a different block width $u\_{j}$ for each block $j < d$. The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure): $$ u\_{j} = w\_{0} + w\_{a}\cdot{j} $$ For **RegNetX** we have additional restrictions: we set $b = 1$ (the bottleneck ratio), $12 \leq d \leq 28$, and $w\_{m} \geq 2$ (the width multiplier). {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{radosavovic2020designing, title={Designing Network Design Spaces}, author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár}, year={2020}, eprint={2003.13678}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: RegNetX Paper: Title: Designing Network Design Spaces URL: https://paperswithcode.com/paper/designing-network-design-spaces Models: - Name: regnetx_002 In Collection: RegNetX Metadata: FLOPs: 255276032 Parameters: 2680000 File Size: 10862199 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_002 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L337 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_002-e7e85e5c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 68.75% Top 5 Accuracy: 88.56% - Name: regnetx_004 In Collection: RegNetX Metadata: FLOPs: 510619136 Parameters: 5160000 File Size: 20841309 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_004 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L343 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_004-7d0e9424.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 72.39% Top 5 Accuracy: 90.82% - Name: regnetx_006 In Collection: RegNetX Metadata: FLOPs: 771659136 Parameters: 6200000 File Size: 24965172 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_006 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L349 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_006-85ec1baa.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 73.84% Top 5 Accuracy: 91.68% - Name: regnetx_008 In Collection: RegNetX Metadata: FLOPs: 1027038208 Parameters: 7260000 File Size: 29235944 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_008 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L355 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_008-d8b470eb.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.05% Top 5 Accuracy: 92.34% - Name: regnetx_016 In Collection: RegNetX Metadata: FLOPs: 2059337856 Parameters: 9190000 File Size: 36988158 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_016 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L361 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_016-65ca972a.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.95% Top 5 Accuracy: 93.43% - Name: regnetx_032 In Collection: RegNetX Metadata: FLOPs: 4082555904 Parameters: 15300000 File Size: 61509573 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_032 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L367 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_032-ed0c7f7e.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.15% Top 5 Accuracy: 94.09% - Name: regnetx_040 In Collection: RegNetX Metadata: FLOPs: 5095167744 Parameters: 22120000 File Size: 88844824 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_040 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L373 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_040-73c2a654.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.48% Top 5 Accuracy: 94.25% - Name: regnetx_064 In Collection: RegNetX Metadata: FLOPs: 8303405824 Parameters: 26210000 File Size: 105184854 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_064 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L379 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_064-29278baa.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.06% Top 5 Accuracy: 94.47% - Name: regnetx_080 In Collection: RegNetX Metadata: FLOPs: 10276726784 Parameters: 39570000 File Size: 158720042 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_080 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L385 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_080-7c7fcab1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.21% Top 5 Accuracy: 94.55% - Name: regnetx_120 In Collection: RegNetX Metadata: FLOPs: 15536378368 Parameters: 46110000 File Size: 184866342 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_120 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L391 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_120-65d5521e.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.61% Top 5 Accuracy: 94.73% - Name: regnetx_160 In Collection: RegNetX Metadata: FLOPs: 20491740672 Parameters: 54280000 File Size: 217623862 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_160 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L397 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_160-c98c4112.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.84% Top 5 Accuracy: 94.82% - Name: regnetx_320 In Collection: RegNetX Metadata: FLOPs: 40798958592 Parameters: 107810000 File Size: 431962133 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_320 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L403 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_320-8ea38b93.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.25% Top 5 Accuracy: 95.03% -->
pytorch-image-models/docs/models/.templates/models/regnetx.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/regnetx.md", "repo_id": "pytorch-image-models", "token_count": 5745 }
164
# SSL ResNeXT A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. The model in this collection utilises semi-supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/abs-1905-00546, author = {I. Zeki Yalniz and Herv{\'{e}} J{\'{e}}gou and Kan Chen and Manohar Paluri and Dhruv Mahajan}, title = {Billion-scale semi-supervised learning for image classification}, journal = {CoRR}, volume = {abs/1905.00546}, year = {2019}, url = {http://arxiv.org/abs/1905.00546}, archivePrefix = {arXiv}, eprint = {1905.00546}, timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: SSL ResNext Paper: Title: Billion-scale semi-supervised learning for image classification URL: https://paperswithcode.com/paper/billion-scale-semi-supervised-learning-for Models: - Name: ssl_resnext101_32x16d In Collection: SSL ResNext Metadata: FLOPs: 46623691776 Parameters: 194030000 File Size: 777518664 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet - YFCC-100M Training Resources: 64x GPUs ID: ssl_resnext101_32x16d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L944 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.84% Top 5 Accuracy: 96.09% - Name: ssl_resnext101_32x4d In Collection: SSL ResNext Metadata: FLOPs: 10298145792 Parameters: 44180000 File Size: 177341913 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet - YFCC-100M Training Resources: 64x GPUs ID: ssl_resnext101_32x4d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L924 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.91% Top 5 Accuracy: 95.73% - Name: ssl_resnext101_32x8d In Collection: SSL ResNext Metadata: FLOPs: 21180417024 Parameters: 88790000 File Size: 356056638 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet - YFCC-100M Training Resources: 64x GPUs ID: ssl_resnext101_32x8d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L934 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.61% Top 5 Accuracy: 96.04% - Name: ssl_resnext50_32x4d In Collection: SSL ResNext Metadata: FLOPs: 5472648192 Parameters: 25030000 File Size: 100428550 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet - YFCC-100M Training Resources: 64x GPUs ID: ssl_resnext50_32x4d LR: 0.0015 Epochs: 30 Layers: 50 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L914 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.3% Top 5 Accuracy: 95.41% -->
pytorch-image-models/docs/models/.templates/models/ssl-resnext.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/ssl-resnext.md", "repo_id": "pytorch-image-models", "token_count": 2623 }
165
site_name: 'Pytorch Image Models' site_description: 'Pretained Image Recognition Models' repo_name: 'rwightman/pytorch-image-models' repo_url: 'https://github.com/rwightman/pytorch-image-models' nav: - index.md - models.md - ... | models/*.md - results.md - scripts.md - training_hparam_examples.md - feature_extraction.md - changes.md - archived_changes.md theme: name: 'material' feature: tabs: false extra_javascript: - 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-MML-AM_CHTML' - https://cdnjs.cloudflare.com/ajax/libs/tablesort/5.2.1/tablesort.min.js - javascripts/tables.js markdown_extensions: - codehilite: linenums: true - admonition - pymdownx.arithmatex - pymdownx.betterem: smart_enable: all - pymdownx.caret - pymdownx.critic - pymdownx.details - pymdownx.emoji: emoji_generator: !!python/name:pymdownx.emoji.to_svg - pymdownx.inlinehilite - pymdownx.magiclink - pymdownx.mark - pymdownx.smartsymbols - pymdownx.superfences - pymdownx.tasklist: custom_checkbox: true - pymdownx.tilde - mdx_truly_sane_lists plugins: - search - awesome-pages - redirects: redirect_maps: 'index.md': 'https://huggingface.co/docs/timm/index' 'models.md': 'https://huggingface.co/docs/timm/models' 'results.md': 'https://huggingface.co/docs/timm/results' 'scripts.md': 'https://huggingface.co/docs/timm/training_script' 'training_hparam_examples.md': 'https://huggingface.co/docs/timm/training_script#training-examples' 'feature_extraction.md': 'https://huggingface.co/docs/timm/feature_extraction'
pytorch-image-models/mkdocs.yml/0
{ "file_path": "pytorch-image-models/mkdocs.yml", "repo_id": "pytorch-image-models", "token_count": 727 }
166
""" Setup """ from setuptools import setup, find_packages from codecs import open from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() exec(open('timm/version.py').read()) setup( name='timm', version=__version__, description='PyTorch Image Models', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/huggingface/pytorch-image-models', author='Ross Wightman', author_email='[email protected]', classifiers=[ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 4 - Beta', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ], # Note that this is a string of words separated by whitespace, not a list. keywords='pytorch pretrained models efficientnet mobilenetv3 mnasnet resnet vision transformer vit', packages=find_packages(exclude=['convert', 'tests', 'results']), include_package_data=True, install_requires=['torch >= 1.7', 'torchvision', 'pyyaml', 'huggingface_hub', 'safetensors'], python_requires='>=3.7', )
pytorch-image-models/setup.py/0
{ "file_path": "pytorch-image-models/setup.py", "repo_id": "pytorch-image-models", "token_count": 675 }
167
from abc import ABC, abstractmethod from typing import Dict, List, Optional, Union class DatasetInfo(ABC): def __init__(self): pass @abstractmethod def num_classes(self): pass @abstractmethod def label_names(self): pass @abstractmethod def label_descriptions(self, detailed: bool = False, as_dict: bool = False) -> Union[List[str], Dict[str, str]]: pass @abstractmethod def index_to_label_name(self, index) -> str: pass @abstractmethod def index_to_description(self, index: int, detailed: bool = False) -> str: pass @abstractmethod def label_name_to_description(self, label: str, detailed: bool = False) -> str: pass class CustomDatasetInfo(DatasetInfo): """ DatasetInfo that wraps passed values for custom datasets.""" def __init__( self, label_names: Union[List[str], Dict[int, str]], label_descriptions: Optional[Dict[str, str]] = None ): super().__init__() assert len(label_names) > 0 self._label_names = label_names # label index => label name mapping self._label_descriptions = label_descriptions # label name => label description mapping if self._label_descriptions is not None: # validate descriptions (label names required) assert isinstance(self._label_descriptions, dict) for n in self._label_names: assert n in self._label_descriptions def num_classes(self): return len(self._label_names) def label_names(self): return self._label_names def label_descriptions(self, detailed: bool = False, as_dict: bool = False) -> Union[List[str], Dict[str, str]]: return self._label_descriptions def label_name_to_description(self, label: str, detailed: bool = False) -> str: if self._label_descriptions: return self._label_descriptions[label] return label # return label name itself if a descriptions is not present def index_to_label_name(self, index) -> str: assert 0 <= index < len(self._label_names) return self._label_names[index] def index_to_description(self, index: int, detailed: bool = False) -> str: label = self.index_to_label_name(index) return self.label_name_to_description(label, detailed=detailed)
pytorch-image-models/timm/data/dataset_info.py/0
{ "file_path": "pytorch-image-models/timm/data/dataset_info.py", "repo_id": "pytorch-image-models", "token_count": 941 }
168
""" Dataset reader that wraps TFDS datasets Wraps many (most?) TFDS image-classification datasets from https://github.com/tensorflow/datasets https://www.tensorflow.org/datasets/catalog/overview#image_classification Hacked together by / Copyright 2020 Ross Wightman """ import math import os import sys from typing import Optional import torch import torch.distributed as dist from PIL import Image try: import tensorflow as tf tf.config.set_visible_devices([], 'GPU') # Hands off my GPU! (or pip install tensorflow-cpu) import tensorflow_datasets as tfds try: tfds.even_splits('', 1, drop_remainder=False) # non-buggy even_splits has drop_remainder arg has_buggy_even_splits = False except TypeError: print("Warning: This version of tfds doesn't have the latest even_splits impl. " "Please update or use tfds-nightly for better fine-grained split behaviour.") has_buggy_even_splits = True # NOTE uncomment below if having file limit issues on dataset build (or alter your OS defaults) # import resource # low, high = resource.getrlimit(resource.RLIMIT_NOFILE) # resource.setrlimit(resource.RLIMIT_NOFILE, (high, high)) except ImportError as e: print(e) print("Please install tensorflow_datasets package `pip install tensorflow-datasets`.") raise e from .class_map import load_class_map from .reader import Reader from .shared_count import SharedCount MAX_TP_SIZE = int(os.environ.get('TFDS_TP_SIZE', 8)) # maximum TF threadpool size, for jpeg decodes and queuing activities SHUFFLE_SIZE = int(os.environ.get('TFDS_SHUFFLE_SIZE', 8192)) # samples to shuffle in DS queue PREFETCH_SIZE = int(os.environ.get('TFDS_PREFETCH_SIZE', 2048)) # samples to prefetch @tfds.decode.make_decoder() def decode_example(serialized_image, feature, dct_method='INTEGER_ACCURATE', channels=3): return tf.image.decode_jpeg( serialized_image, channels=channels, dct_method=dct_method, ) def even_split_indices(split, n, num_samples): partitions = [round(i * num_samples / n) for i in range(n + 1)] return [f"{split}[{partitions[i]}:{partitions[i + 1]}]" for i in range(n)] def get_class_labels(info): if 'label' not in info.features: return {} class_label = info.features['label'] class_to_idx = {n: class_label.str2int(n) for n in class_label.names} return class_to_idx class ReaderTfds(Reader): """ Wrap Tensorflow Datasets for use in PyTorch There several things to be aware of: * To prevent excessive samples being dropped per epoch w/ distributed training or multiplicity of dataloader workers, the train iterator wraps to avoid returning partial batches that trigger drop_last https://github.com/pytorch/pytorch/issues/33413 * With PyTorch IterableDatasets, each worker in each replica operates in isolation, the final batch from each worker could be a different size. For training this is worked around by option above, for validation extra samples are inserted iff distributed mode is enabled so that the batches being reduced across replicas are of same size. This will slightly alter the results, distributed validation will not be 100% correct. This is similar to common handling in DistributedSampler for normal Datasets but a bit worse since there are up to N * J extra samples with IterableDatasets. * The sharding (splitting of dataset into TFRecord) files imposes limitations on the number of replicas and dataloader workers you can use. For really small datasets that only contain a few shards you may have to train non-distributed w/ 1-2 dataloader workers. This is likely not a huge concern as the benefit of distributed training or fast dataloading should be much less for small datasets. * This wrapper is currently configured to return individual, decompressed image samples from the TFDS dataset. The augmentation (transforms) and batching is still done in PyTorch. It would be possible to specify TF augmentation fn and return augmented batches w/ some modifications to other downstream components. """ def __init__( self, name, root=None, split='train', class_map=None, is_training=False, batch_size=1, download=False, repeats=0, seed=42, input_key='image', input_img_mode='RGB', target_key='label', target_img_mode='', prefetch_size=None, shuffle_size=None, max_threadpool_size=None ): """ Tensorflow-datasets Wrapper Args: root: root data dir (ie your TFDS_DATA_DIR. not dataset specific sub-dir) name: tfds dataset name (eg `imagenet2012`) split: tfds dataset split (can use all TFDS split strings eg `train[:10%]`) is_training: training mode, shuffle enabled, dataset len rounded by batch_size batch_size: batch_size to use to unsure total samples % batch_size == 0 in training across all dis nodes download: download and build TFDS dataset if set, otherwise must use tfds CLI repeats: iterate through (repeat) the dataset this many times per iteration (once if 0 or 1) seed: common seed for shard shuffle across all distributed/worker instances input_key: name of Feature to return as data (input) input_img_mode: image mode if input is an image (currently PIL mode string) target_key: name of Feature to return as target (label) target_img_mode: image mode if target is an image (currently PIL mode string) prefetch_size: override default tf.data prefetch buffer size shuffle_size: override default tf.data shuffle buffer size max_threadpool_size: override default threadpool size for tf.data """ super().__init__() self.root = root self.split = split self.is_training = is_training self.batch_size = batch_size self.repeats = repeats self.common_seed = seed # a seed that's fixed across all worker / distributed instances # performance settings self.prefetch_size = prefetch_size or PREFETCH_SIZE self.shuffle_size = shuffle_size or SHUFFLE_SIZE self.max_threadpool_size = max_threadpool_size or MAX_TP_SIZE # TFDS builder and split information self.input_key = input_key # FIXME support tuples / lists of inputs and targets and full range of Feature self.input_img_mode = input_img_mode self.target_key = target_key self.target_img_mode = target_img_mode # for dense pixel targets self.builder = tfds.builder(name, data_dir=root) # NOTE: the tfds command line app can be used download & prepare datasets if you don't enable download flag if download: self.builder.download_and_prepare() self.remap_class = False if class_map: self.class_to_idx = load_class_map(class_map) self.remap_class = True else: self.class_to_idx = get_class_labels(self.builder.info) if self.target_key == 'label' else {} self.split_info = self.builder.info.splits[split] self.num_samples = self.split_info.num_examples # Distributed world state self.dist_rank = 0 self.dist_num_replicas = 1 if dist.is_available() and dist.is_initialized() and dist.get_world_size() > 1: self.dist_rank = dist.get_rank() self.dist_num_replicas = dist.get_world_size() # Attributes that are updated in _lazy_init, including the tf.data pipeline itself self.global_num_workers = 1 self.num_workers = 1 self.worker_info = None self.worker_seed = 0 # seed unique to each work instance self.subsplit = None # set when data is distributed across workers using sub-splits self.ds = None # initialized lazily on each dataloader worker process self.init_count = 0 # number of ds TF data pipeline initializations self.epoch_count = SharedCount() # FIXME need to determine if reinit_each_iter is necessary. I'm don't completely trust behaviour # of `shuffle_reshuffle_each_iteration` when there are multiple workers / nodes across epochs self.reinit_each_iter = self.is_training def set_epoch(self, count): self.epoch_count.value = count def set_loader_cfg( self, num_workers: Optional[int] = None, ): if self.ds is not None: return if num_workers is not None: self.num_workers = num_workers self.global_num_workers = self.dist_num_replicas * self.num_workers def _lazy_init(self): """ Lazily initialize the dataset. This is necessary to init the Tensorflow dataset pipeline in the (dataloader) process that will be using the dataset instance. The __init__ method is called on the main process, this will be called in a dataloader worker process. NOTE: There will be problems if you try to re-use this dataset across different loader/worker instances once it has been initialized. Do not call any dataset methods that can call _lazy_init before it is passed to dataloader. """ worker_info = torch.utils.data.get_worker_info() # setup input context to split dataset across distributed processes num_workers = 1 global_worker_id = 0 if worker_info is not None: self.worker_info = worker_info self.worker_seed = worker_info.seed self.num_workers = worker_info.num_workers self.global_num_workers = self.dist_num_replicas * self.num_workers global_worker_id = self.dist_rank * self.num_workers + worker_info.id """ Data sharding InputContext will assign subset of underlying TFRecord files to each 'pipeline' if used. My understanding is that using split, the underling TFRecord files will shuffle (shuffle_files=True) between the splits each iteration, but that understanding could be wrong. I am currently using a mix of InputContext shard assignment and fine-grained sub-splits for distributing the data across workers. For training InputContext is used to assign shards to nodes unless num_shards in dataset < total number of workers. Otherwise sub-split API is used for datasets without enough shards or for validation where we can't drop samples and need to avoid minimize uneven splits to avoid padding. """ should_subsplit = self.global_num_workers > 1 and ( self.split_info.num_shards < self.global_num_workers or not self.is_training) if should_subsplit: # split the dataset w/o using sharding for more even samples / worker, can result in less optimal # read patterns for distributed training (overlap across shards) so better to use InputContext there if has_buggy_even_splits: # my even_split workaround doesn't work on subsplits, upgrade tfds! if not isinstance(self.split_info, tfds.core.splits.SubSplitInfo): subsplits = even_split_indices(self.split, self.global_num_workers, self.num_samples) self.subsplit = subsplits[global_worker_id] else: subsplits = tfds.even_splits(self.split, self.global_num_workers) self.subsplit = subsplits[global_worker_id] input_context = None if self.global_num_workers > 1 and self.subsplit is None: # set input context to divide shards among distributed replicas input_context = tf.distribute.InputContext( num_input_pipelines=self.global_num_workers, input_pipeline_id=global_worker_id, num_replicas_in_sync=self.dist_num_replicas # FIXME does this arg have any impact? ) read_config = tfds.ReadConfig( shuffle_seed=self.common_seed + self.epoch_count.value, shuffle_reshuffle_each_iteration=True, input_context=input_context, ) ds = self.builder.as_dataset( split=self.subsplit or self.split, shuffle_files=self.is_training, decoders=dict(image=decode_example(channels=1 if self.input_img_mode == 'L' else 3)), read_config=read_config, ) # avoid overloading threading w/ combo of TF ds threads + PyTorch workers options = tf.data.Options() thread_member = 'threading' if hasattr(options, 'threading') else 'experimental_threading' getattr(options, thread_member).private_threadpool_size = max(1, self.max_threadpool_size // self.num_workers) getattr(options, thread_member).max_intra_op_parallelism = 1 ds = ds.with_options(options) if self.is_training or self.repeats > 1: # to prevent excessive drop_last batch behaviour w/ IterableDatasets # see warnings at https://pytorch.org/docs/stable/data.html#multi-process-data-loading ds = ds.repeat() # allow wrap around and break iteration manually if self.is_training: ds = ds.shuffle(min(self.num_samples, self.shuffle_size) // self.global_num_workers, seed=self.worker_seed) ds = ds.prefetch(min(self.num_samples // self.global_num_workers, self.prefetch_size)) self.ds = tfds.as_numpy(ds) self.init_count += 1 def _num_samples_per_worker(self): num_worker_samples = \ max(1, self.repeats) * self.num_samples / max(self.global_num_workers, self.dist_num_replicas) if self.is_training or self.dist_num_replicas > 1: num_worker_samples = math.ceil(num_worker_samples) if self.is_training: num_worker_samples = math.ceil(num_worker_samples / self.batch_size) * self.batch_size return int(num_worker_samples) def __iter__(self): if self.ds is None or self.reinit_each_iter: self._lazy_init() # Compute a rounded up sample count that is used to: # 1. make batches even cross workers & replicas in distributed validation. # This adds extra samples and will slightly alter validation results. # 2. determine loop ending condition in training w/ repeat enabled so that only full batch_size # batches are produced (underlying tfds iter wraps around) target_sample_count = self._num_samples_per_worker() # Iterate until exhausted or sample count hits target when training (ds.repeat enabled) sample_count = 0 for sample in self.ds: input_data = sample[self.input_key] if self.input_img_mode: if self.input_img_mode == 'L' and input_data.ndim == 3: input_data = input_data[:, :, 0] input_data = Image.fromarray(input_data, mode=self.input_img_mode) target_data = sample[self.target_key] if self.target_img_mode: # dense pixel target target_data = Image.fromarray(target_data, mode=self.target_img_mode) elif self.remap_class: target_data = self.class_to_idx[target_data] yield input_data, target_data sample_count += 1 if self.is_training and sample_count >= target_sample_count: # Need to break out of loop when repeat() is enabled for training w/ oversampling # this results in extra samples per epoch but seems more desirable than dropping # up to N*J batches per epoch (where N = num distributed processes, and J = num worker processes) break # Pad across distributed nodes (make counts equal by adding samples) if not self.is_training and self.dist_num_replicas > 1 and self.subsplit is not None and \ 0 < sample_count < target_sample_count: # Validation batch padding only done for distributed training where results are reduced across nodes. # For single process case, it won't matter if workers return different batch sizes. # If using input_context or % based splits, sample count can vary significantly across workers and this # approach should not be used (hence disabled if self.subsplit isn't set). while sample_count < target_sample_count: yield input_data, target_data # yield prev sample again sample_count += 1 def __len__(self): num_samples = self._num_samples_per_worker() * self.num_workers return num_samples def _filename(self, index, basename=False, absolute=False): assert False, "Not supported" # no random access to samples def filenames(self, basename=False, absolute=False): """ Return all filenames in dataset, overrides base""" if self.ds is None: self._lazy_init() names = [] for sample in self.ds: if len(names) > self.num_samples: break # safety for ds.repeat() case if 'file_name' in sample: name = sample['file_name'] elif 'filename' in sample: name = sample['filename'] elif 'id' in sample: name = sample['id'] else: assert False, "No supported name field present" names.append(name) return names
pytorch-image-models/timm/data/readers/reader_tfds.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/reader_tfds.py", "repo_id": "pytorch-image-models", "token_count": 7089 }
169
""" CBAM (sort-of) Attention Experimental impl of CBAM: Convolutional Block Attention Module: https://arxiv.org/abs/1807.06521 WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on some tasks, especially fine-grained it seems. I may end up removing this impl. Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn import torch.nn.functional as F from .conv_bn_act import ConvNormAct from .create_act import create_act_layer, get_act_layer from .helpers import make_divisible class ChannelAttn(nn.Module): """ Original CBAM channel attention module, currently avg + max pool variant only. """ def __init__( self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(ChannelAttn, self).__init__() if not rd_channels: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) self.fc1 = nn.Conv2d(channels, rd_channels, 1, bias=mlp_bias) self.act = act_layer(inplace=True) self.fc2 = nn.Conv2d(rd_channels, channels, 1, bias=mlp_bias) self.gate = create_act_layer(gate_layer) def forward(self, x): x_avg = self.fc2(self.act(self.fc1(x.mean((2, 3), keepdim=True)))) x_max = self.fc2(self.act(self.fc1(x.amax((2, 3), keepdim=True)))) return x * self.gate(x_avg + x_max) class LightChannelAttn(ChannelAttn): """An experimental 'lightweight' that sums avg + max pool first """ def __init__( self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(LightChannelAttn, self).__init__( channels, rd_ratio, rd_channels, rd_divisor, act_layer, gate_layer, mlp_bias) def forward(self, x): x_pool = 0.5 * x.mean((2, 3), keepdim=True) + 0.5 * x.amax((2, 3), keepdim=True) x_attn = self.fc2(self.act(self.fc1(x_pool))) return x * F.sigmoid(x_attn) class SpatialAttn(nn.Module): """ Original CBAM spatial attention module """ def __init__(self, kernel_size=7, gate_layer='sigmoid'): super(SpatialAttn, self).__init__() self.conv = ConvNormAct(2, 1, kernel_size, apply_act=False) self.gate = create_act_layer(gate_layer) def forward(self, x): x_attn = torch.cat([x.mean(dim=1, keepdim=True), x.amax(dim=1, keepdim=True)], dim=1) x_attn = self.conv(x_attn) return x * self.gate(x_attn) class LightSpatialAttn(nn.Module): """An experimental 'lightweight' variant that sums avg_pool and max_pool results. """ def __init__(self, kernel_size=7, gate_layer='sigmoid'): super(LightSpatialAttn, self).__init__() self.conv = ConvNormAct(1, 1, kernel_size, apply_act=False) self.gate = create_act_layer(gate_layer) def forward(self, x): x_attn = 0.5 * x.mean(dim=1, keepdim=True) + 0.5 * x.amax(dim=1, keepdim=True) x_attn = self.conv(x_attn) return x * self.gate(x_attn) class CbamModule(nn.Module): def __init__( self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(CbamModule, self).__init__() self.channel = ChannelAttn( channels, rd_ratio=rd_ratio, rd_channels=rd_channels, rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) self.spatial = SpatialAttn(spatial_kernel_size, gate_layer=gate_layer) def forward(self, x): x = self.channel(x) x = self.spatial(x) return x class LightCbamModule(nn.Module): def __init__( self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(LightCbamModule, self).__init__() self.channel = LightChannelAttn( channels, rd_ratio=rd_ratio, rd_channels=rd_channels, rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) self.spatial = LightSpatialAttn(spatial_kernel_size) def forward(self, x): x = self.channel(x) x = self.spatial(x) return x
pytorch-image-models/timm/layers/cbam.py/0
{ "file_path": "pytorch-image-models/timm/layers/cbam.py", "repo_id": "pytorch-image-models", "token_count": 2016 }
170
from enum import Enum from typing import Union import torch class Format(str, Enum): NCHW = 'NCHW' NHWC = 'NHWC' NCL = 'NCL' NLC = 'NLC' FormatT = Union[str, Format] def get_spatial_dim(fmt: FormatT): fmt = Format(fmt) if fmt is Format.NLC: dim = (1,) elif fmt is Format.NCL: dim = (2,) elif fmt is Format.NHWC: dim = (1, 2) else: dim = (2, 3) return dim def get_channel_dim(fmt: FormatT): fmt = Format(fmt) if fmt is Format.NHWC: dim = 3 elif fmt is Format.NLC: dim = 2 else: dim = 1 return dim def nchw_to(x: torch.Tensor, fmt: Format): if fmt == Format.NHWC: x = x.permute(0, 2, 3, 1) elif fmt == Format.NLC: x = x.flatten(2).transpose(1, 2) elif fmt == Format.NCL: x = x.flatten(2) return x def nhwc_to(x: torch.Tensor, fmt: Format): if fmt == Format.NCHW: x = x.permute(0, 3, 1, 2) elif fmt == Format.NLC: x = x.flatten(1, 2) elif fmt == Format.NCL: x = x.flatten(1, 2).transpose(1, 2) return x
pytorch-image-models/timm/layers/format.py/0
{ "file_path": "pytorch-image-models/timm/layers/format.py", "repo_id": "pytorch-image-models", "token_count": 572 }
171
""" Normalization layers and wrappers Norm layer definitions that support fast norm and consistent channel arg order (always first arg). Hacked together by / Copyright 2022 Ross Wightman """ import numbers from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from .fast_norm import is_fast_norm, fast_group_norm, fast_layer_norm, fast_rms_norm class GroupNorm(nn.GroupNorm): def __init__(self, num_channels, num_groups=32, eps=1e-5, affine=True): # NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN super().__init__(num_groups, num_channels, eps=eps, affine=affine) self.fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x): if self.fast_norm: return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) class GroupNorm1(nn.GroupNorm): """ Group Normalization with 1 group. Input: tensor in shape [B, C, *] """ def __init__(self, num_channels, **kwargs): super().__init__(1, num_channels, **kwargs) self.fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.fast_norm: return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) class LayerNorm(nn.LayerNorm): """ LayerNorm w/ fast norm option """ def __init__(self, num_channels, eps=1e-6, affine=True): super().__init__(num_channels, eps=eps, elementwise_affine=affine) self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x: torch.Tensor) -> torch.Tensor: if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) return x class LayerNorm2d(nn.LayerNorm): """ LayerNorm for channels of '2D' spatial NCHW tensors """ def __init__(self, num_channels, eps=1e-6, affine=True): super().__init__(num_channels, eps=eps, elementwise_affine=affine) self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x: torch.Tensor) -> torch.Tensor: x = x.permute(0, 2, 3, 1) if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) x = x.permute(0, 3, 1, 2) return x def _is_contiguous(tensor: torch.Tensor) -> bool: # jit is oh so lovely :/ if torch.jit.is_scripting(): return tensor.is_contiguous() else: return tensor.is_contiguous(memory_format=torch.contiguous_format) @torch.jit.script def _layer_norm_cf(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float): s, u = torch.var_mean(x, dim=1, unbiased=False, keepdim=True) x = (x - u) * torch.rsqrt(s + eps) x = x * weight[:, None, None] + bias[:, None, None] return x def _layer_norm_cf_sqm(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float): u = x.mean(dim=1, keepdim=True) s = ((x * x).mean(dim=1, keepdim=True) - (u * u)).clamp(0) x = (x - u) * torch.rsqrt(s + eps) x = x * weight.view(1, -1, 1, 1) + bias.view(1, -1, 1, 1) return x class LayerNormExp2d(nn.LayerNorm): """ LayerNorm for channels_first tensors with 2d spatial dimensions (ie N, C, H, W). Experimental implementation w/ manual norm for tensors non-contiguous tensors. This improves throughput in some scenarios (tested on Ampere GPU), esp w/ channels_last layout. However, benefits are not always clear and can perform worse on other GPUs. """ def __init__(self, num_channels, eps=1e-6): super().__init__(num_channels, eps=eps) def forward(self, x) -> torch.Tensor: if _is_contiguous(x): x = F.layer_norm( x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) else: x = _layer_norm_cf(x, self.weight, self.bias, self.eps) return x class RmsNorm(nn.Module): """ RmsNorm w/ fast (apex) norm if available """ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine'] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool def __init__(self, channels, eps=1e-6, affine=True, device=None, dtype=None) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() normalized_shape = channels if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = affine if self.elementwise_affine: self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: # NOTE fast norm fallback needs our rms norm impl, so both paths through here. # Since there is no built-in PyTorch impl, always use APEX RmsNorm if is installed. x = fast_rms_norm(x, self.normalized_shape, self.weight, self.eps) return x
pytorch-image-models/timm/layers/norm.py/0
{ "file_path": "pytorch-image-models/timm/layers/norm.py", "repo_id": "pytorch-image-models", "token_count": 2520 }
172
""" Test Time Pooling (Average-Max Pool) Hacked together by / Copyright 2020 Ross Wightman """ import logging from torch import nn import torch.nn.functional as F from .adaptive_avgmax_pool import adaptive_avgmax_pool2d _logger = logging.getLogger(__name__) class TestTimePoolHead(nn.Module): def __init__(self, base, original_pool=7): super(TestTimePoolHead, self).__init__() self.base = base self.original_pool = original_pool base_fc = self.base.get_classifier() if isinstance(base_fc, nn.Conv2d): self.fc = base_fc else: self.fc = nn.Conv2d( self.base.num_features, self.base.num_classes, kernel_size=1, bias=True) self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size())) self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size())) self.base.reset_classifier(0) # delete original fc layer def forward(self, x): x = self.base.forward_features(x) x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1) x = self.fc(x) x = adaptive_avgmax_pool2d(x, 1) return x.view(x.size(0), -1) def apply_test_time_pool(model, config, use_test_size=False): test_time_pool = False if not hasattr(model, 'default_cfg') or not model.default_cfg: return model, False if use_test_size and 'test_input_size' in model.default_cfg: df_input_size = model.default_cfg['test_input_size'] else: df_input_size = model.default_cfg['input_size'] if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]: _logger.info('Target input size %s > pretrained default %s, using test time pooling' % (str(config['input_size'][-2:]), str(df_input_size[-2:]))) model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size']) test_time_pool = True return model, test_time_pool
pytorch-image-models/timm/layers/test_time_pool.py/0
{ "file_path": "pytorch-image-models/timm/layers/test_time_pool.py", "repo_id": "pytorch-image-models", "token_count": 881 }
173
""" Model creation / weight loading / state_dict helpers Hacked together by / Copyright 2020 Ross Wightman """ import logging import os from collections import OrderedDict from typing import Any, Callable, Dict, Optional, Union import torch try: import safetensors.torch _has_safetensors = True except ImportError: _has_safetensors = False _logger = logging.getLogger(__name__) __all__ = ['clean_state_dict', 'load_state_dict', 'load_checkpoint', 'remap_state_dict', 'resume_checkpoint'] def clean_state_dict(state_dict: Dict[str, Any]) -> Dict[str, Any]: # 'clean' checkpoint by removing .module prefix from state dict if it exists from parallel training cleaned_state_dict = {} for k, v in state_dict.items(): name = k[7:] if k.startswith('module.') else k cleaned_state_dict[name] = v return cleaned_state_dict def load_state_dict( checkpoint_path: str, use_ema: bool = True, device: Union[str, torch.device] = 'cpu', ) -> Dict[str, Any]: if checkpoint_path and os.path.isfile(checkpoint_path): # Check if safetensors or not and load weights accordingly if str(checkpoint_path).endswith(".safetensors"): assert _has_safetensors, "`pip install safetensors` to use .safetensors" checkpoint = safetensors.torch.load_file(checkpoint_path, device=device) else: checkpoint = torch.load(checkpoint_path, map_location=device) state_dict_key = '' if isinstance(checkpoint, dict): if use_ema and checkpoint.get('state_dict_ema', None) is not None: state_dict_key = 'state_dict_ema' elif use_ema and checkpoint.get('model_ema', None) is not None: state_dict_key = 'model_ema' elif 'state_dict' in checkpoint: state_dict_key = 'state_dict' elif 'model' in checkpoint: state_dict_key = 'model' state_dict = clean_state_dict(checkpoint[state_dict_key] if state_dict_key else checkpoint) _logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path)) return state_dict else: _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) raise FileNotFoundError() def load_checkpoint( model: torch.nn.Module, checkpoint_path: str, use_ema: bool = True, device: Union[str, torch.device] = 'cpu', strict: bool = True, remap: bool = False, filter_fn: Optional[Callable] = None, ): if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'): # numpy checkpoint, try to load via model specific load_pretrained fn if hasattr(model, 'load_pretrained'): model.load_pretrained(checkpoint_path) else: raise NotImplementedError('Model cannot load numpy checkpoint') return state_dict = load_state_dict(checkpoint_path, use_ema, device=device) if remap: state_dict = remap_state_dict(state_dict, model) elif filter_fn: state_dict = filter_fn(state_dict, model) incompatible_keys = model.load_state_dict(state_dict, strict=strict) return incompatible_keys def remap_state_dict( state_dict: Dict[str, Any], model: torch.nn.Module, allow_reshape: bool = True ): """ remap checkpoint by iterating over state dicts in order (ignoring original keys). This assumes models (and originating state dict) were created with params registered in same order. """ out_dict = {} for (ka, va), (kb, vb) in zip(model.state_dict().items(), state_dict.items()): assert va.numel() == vb.numel(), f'Tensor size mismatch {ka}: {va.shape} vs {kb}: {vb.shape}. Remap failed.' if va.shape != vb.shape: if allow_reshape: vb = vb.reshape(va.shape) else: assert False, f'Tensor shape mismatch {ka}: {va.shape} vs {kb}: {vb.shape}. Remap failed.' out_dict[ka] = vb return out_dict def resume_checkpoint( model: torch.nn.Module, checkpoint_path: str, optimizer: torch.optim.Optimizer = None, loss_scaler: Any = None, log_info: bool = True, ): resume_epoch = None if os.path.isfile(checkpoint_path): checkpoint = torch.load(checkpoint_path, map_location='cpu') if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: if log_info: _logger.info('Restoring model state from checkpoint...') state_dict = clean_state_dict(checkpoint['state_dict']) model.load_state_dict(state_dict) if optimizer is not None and 'optimizer' in checkpoint: if log_info: _logger.info('Restoring optimizer state from checkpoint...') optimizer.load_state_dict(checkpoint['optimizer']) if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint: if log_info: _logger.info('Restoring AMP loss scaler state from checkpoint...') loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key]) if 'epoch' in checkpoint: resume_epoch = checkpoint['epoch'] if 'version' in checkpoint and checkpoint['version'] > 1: resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save if log_info: _logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch'])) else: model.load_state_dict(checkpoint) if log_info: _logger.info("Loaded checkpoint '{}'".format(checkpoint_path)) return resume_epoch else: _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) raise FileNotFoundError()
pytorch-image-models/timm/models/_helpers.py/0
{ "file_path": "pytorch-image-models/timm/models/_helpers.py", "repo_id": "pytorch-image-models", "token_count": 2546 }
174
""" ConViT Model @article{d2021convit, title={ConViT: Improving Vision Transformers with Soft Convolutional Inductive Biases}, author={d'Ascoli, St{\'e}phane and Touvron, Hugo and Leavitt, Matthew and Morcos, Ari and Biroli, Giulio and Sagun, Levent}, journal={arXiv preprint arXiv:2103.10697}, year={2021} } Paper link: https://arxiv.org/abs/2103.10697 Original code: https://github.com/facebookresearch/convit, original copyright below Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman """ # Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the CC-by-NC license found in the # LICENSE file in the root directory of this source tree. # '''These modules are adapted from those of timm, see https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py ''' from functools import partial import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, trunc_normal_, PatchEmbed, Mlp, LayerNorm from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs from .vision_transformer_hybrid import HybridEmbed __all__ = ['ConVit'] @register_notrace_module # reason: FX can't symbolically trace control flow in forward method class GPSA(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., locality_strength=1., ): super().__init__() self.num_heads = num_heads self.dim = dim head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.locality_strength = locality_strength self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias) self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.pos_proj = nn.Linear(3, num_heads) self.proj_drop = nn.Dropout(proj_drop) self.gating_param = nn.Parameter(torch.ones(self.num_heads)) self.rel_indices: torch.Tensor = torch.zeros(1, 1, 1, 3) # silly torchscript hack, won't work with None def forward(self, x): B, N, C = x.shape if self.rel_indices is None or self.rel_indices.shape[1] != N: self.rel_indices = self.get_rel_indices(N) attn = self.get_attention(x) v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x def get_attention(self, x): B, N, C = x.shape qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k = qk[0], qk[1] pos_score = self.rel_indices.expand(B, -1, -1, -1) pos_score = self.pos_proj(pos_score).permute(0, 3, 1, 2) patch_score = (q @ k.transpose(-2, -1)) * self.scale patch_score = patch_score.softmax(dim=-1) pos_score = pos_score.softmax(dim=-1) gating = self.gating_param.view(1, -1, 1, 1) attn = (1. - torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score attn /= attn.sum(dim=-1).unsqueeze(-1) attn = self.attn_drop(attn) return attn def get_attention_map(self, x, return_map=False): attn_map = self.get_attention(x).mean(0) # average over batch distances = self.rel_indices.squeeze()[:, :, -1] ** .5 dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / distances.size(0) if return_map: return dist, attn_map else: return dist def local_init(self): self.v.weight.data.copy_(torch.eye(self.dim)) locality_distance = 1 # max(1,1/locality_strength**.5) kernel_size = int(self.num_heads ** .5) center = (kernel_size - 1) / 2 if kernel_size % 2 == 0 else kernel_size // 2 for h1 in range(kernel_size): for h2 in range(kernel_size): position = h1 + kernel_size * h2 self.pos_proj.weight.data[position, 2] = -1 self.pos_proj.weight.data[position, 1] = 2 * (h1 - center) * locality_distance self.pos_proj.weight.data[position, 0] = 2 * (h2 - center) * locality_distance self.pos_proj.weight.data *= self.locality_strength def get_rel_indices(self, num_patches: int) -> torch.Tensor: img_size = int(num_patches ** .5) rel_indices = torch.zeros(1, num_patches, num_patches, 3) ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) indx = ind.repeat(img_size, img_size) indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) indd = indx ** 2 + indy ** 2 rel_indices[:, :, :, 2] = indd.unsqueeze(0) rel_indices[:, :, :, 1] = indy.unsqueeze(0) rel_indices[:, :, :, 0] = indx.unsqueeze(0) device = self.qk.weight.device return rel_indices.to(device) class MHSA(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def get_attention_map(self, x, return_map=False): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn_map = (q @ k.transpose(-2, -1)) * self.scale attn_map = attn_map.softmax(dim=-1).mean(0) img_size = int(N ** .5) ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) indx = ind.repeat(img_size, img_size) indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) indd = indx ** 2 + indy ** 2 distances = indd ** .5 distances = distances.to(x.device) dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / N if return_map: return dist, attn_map else: return dist def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=LayerNorm, use_gpsa=True, locality_strength=1., ): super().__init__() self.norm1 = norm_layer(dim) self.use_gpsa = use_gpsa if self.use_gpsa: self.attn = GPSA( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, locality_strength=locality_strength, ) else: self.attn = MHSA( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop, ) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class ConVit(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__( self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token', embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, drop_rate=0., pos_drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=LayerNorm, local_up_to_layer=3, locality_strength=1., use_pos_embed=True, ): super().__init__() assert global_pool in ('', 'avg', 'token') embed_dim *= num_heads self.num_classes = num_classes self.global_pool = global_pool self.local_up_to_layer = local_up_to_layer self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.locality_strength = locality_strength self.use_pos_embed = use_pos_embed if hybrid_backbone is not None: self.patch_embed = HybridEmbed( hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) else: self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, ) num_patches = self.patch_embed.num_patches self.num_patches = num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_drop = nn.Dropout(p=pos_drop_rate) if self.use_pos_embed: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) trunc_normal_(self.pos_embed, std=.02) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, use_gpsa=i < local_up_to_layer, locality_strength=locality_strength, ) for i in range(depth)]) self.norm = norm_layer(embed_dim) # Classifier head self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')] self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) for n, m in self.named_modules(): if hasattr(m, 'local_init'): m.local_init() def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'token', 'avg') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) if self.use_pos_embed: x = x + self.pos_embed x = self.pos_drop(x) cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) for u, blk in enumerate(self.blocks): if u == self.local_up_to_layer: x = torch.cat((cls_tokens, x), dim=1) x = blk(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_convit(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') return build_model_with_cfg(ConVit, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ # ConViT 'convit_tiny.fb_in1k': _cfg(hf_hub_id='timm/'), 'convit_small.fb_in1k': _cfg(hf_hub_id='timm/'), 'convit_base.fb_in1k': _cfg(hf_hub_id='timm/') }) @register_model def convit_tiny(pretrained=False, **kwargs) -> ConVit: model_args = dict( local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=4) model = _create_convit(variant='convit_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convit_small(pretrained=False, **kwargs) -> ConVit: model_args = dict( local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=9) model = _create_convit(variant='convit_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convit_base(pretrained=False, **kwargs) -> ConVit: model_args = dict( local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=16) model = _create_convit(variant='convit_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model
pytorch-image-models/timm/models/convit.py/0
{ "file_path": "pytorch-image-models/timm/models/convit.py", "repo_id": "pytorch-image-models", "token_count": 7716 }
175
""" EVA EVA from https://github.com/baaivision/EVA , paper: https://arxiv.org/abs/2211.07636 @article{EVA, title={EVA: Exploring the Limits of Masked Visual Representation Learning at Scale}, author={Fang, Yuxin and Wang, Wen and Xie, Binhui and Sun, Quan and Wu, Ledell and Wang, Xinggang and Huang, Tiejun and Wang, Xinlong and Cao, Yue}, journal={arXiv preprint arXiv:2211.07636}, year={2022} } EVA-02: A Visual Representation for Neon Genesis - https://arxiv.org/abs/2303.11331 @article{EVA02, title={EVA-02: A Visual Representation for Neon Genesis}, author={Fang, Yuxin and Sun, Quan and Wang, Xinggang and Huang, Tiejun and Wang, Xinlong and Cao, Yue}, journal={arXiv preprint arXiv:2303.11331}, year={2023} } This file contains EVA & EVA02 model implementations evolved from BEiT, additional models in vision_transformer.py. Modifications by / Copyright 2023 Ross Wightman, original copyrights below """ # EVA models Copyright (c) 2022 BAAI-Vision # EVA02 models Copyright (c) 2023 BAAI-Vision import math from typing import Callable, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import PatchEmbed, Mlp, GluMlp, SwiGLU, LayerNorm, DropPath, PatchDropout, RotaryEmbeddingCat, \ apply_rot_embed_cat, apply_keep_indices_nlc, trunc_normal_, resample_patch_embed, resample_abs_pos_embed, \ to_2tuple, use_fused_attn from ._builder import build_model_with_cfg from ._registry import generate_default_cfgs, register_model __all__ = ['Eva'] class EvaAttention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__( self, dim: int, num_heads: int = 8, qkv_bias: bool = True, qkv_fused: bool = True, attn_drop: float = 0., proj_drop: float = 0., attn_head_dim: Optional[int] = None, norm_layer: Optional[Callable] = None, ): """ Args: dim: num_heads: qkv_bias: qkv_fused: attn_drop: proj_drop: attn_head_dim: norm_layer: """ super().__init__() self.num_heads = num_heads head_dim = dim // num_heads if attn_head_dim is not None: head_dim = attn_head_dim all_head_dim = head_dim * self.num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() if qkv_fused: self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) self.q_proj = self.k_proj = self.v_proj = None if qkv_bias: self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) self.register_buffer('k_bias', torch.zeros(all_head_dim), persistent=False) self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) else: self.q_bias = self.k_bias = self.v_bias = None else: self.q_proj = nn.Linear(dim, all_head_dim, bias=qkv_bias) self.k_proj = nn.Linear(dim, all_head_dim, bias=False) self.v_proj = nn.Linear(dim, all_head_dim, bias=qkv_bias) self.qkv = None self.q_bias = self.k_bias = self.v_bias = None self.attn_drop = nn.Dropout(attn_drop) self.norm = norm_layer(all_head_dim) if norm_layer is not None else nn.Identity() self.proj = nn.Linear(all_head_dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward( self, x, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None, ): B, N, C = x.shape if self.qkv is not None: qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.q_bias is not None else None qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) # B, num_heads, N, head_dim else: q = self.q_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) # B, num_heads, N, C k = self.k_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) v = self.v_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) if rope is not None: q = torch.cat([q[:, :, :1, :], apply_rot_embed_cat(q[:, :, 1:, :], rope)], 2).type_as(v) k = torch.cat([k[:, :, :1, :], apply_rot_embed_cat(k[:, :, 1:, :], rope)], 2).type_as(v) if self.fused_attn: x = F.scaled_dot_product_attention( q, k, v, attn_mask=attn_mask, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = (q @ k.transpose(-2, -1)) attn = attn.softmax(dim=-1) if attn_mask is not None: attn_mask = attn_mask.to(torch.bool) attn = attn.masked_fill(~attn_mask[:, None, None, :], float("-inf")) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.norm(x) x = self.proj(x) x = self.proj_drop(x) return x class EvaBlock(nn.Module): def __init__( self, dim: int, num_heads: int, qkv_bias: bool = True, qkv_fused: bool = True, mlp_ratio: float = 4., swiglu_mlp: bool = False, scale_mlp: bool = False, scale_attn_inner: bool = False, proj_drop: float = 0., attn_drop: float = 0., drop_path: float = 0., init_values: Optional[float] = None, act_layer: Callable = nn.GELU, norm_layer: Callable = LayerNorm, attn_head_dim: Optional[int] = None, ): """ Args: dim: num_heads: qkv_bias: qkv_fused: mlp_ratio: swiglu_mlp: scale_mlp: scale_attn_inner: proj_drop: attn_drop: drop_path: init_values: act_layer: norm_layer: attn_head_dim: """ super().__init__() self.norm1 = norm_layer(dim) self.attn = EvaAttention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qkv_fused=qkv_fused, attn_drop=attn_drop, proj_drop=proj_drop, attn_head_dim=attn_head_dim, norm_layer=norm_layer if scale_attn_inner else None, ) self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) if init_values is not None else None self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) hidden_features = int(dim * mlp_ratio) if swiglu_mlp: if scale_mlp: # when norm in SwiGLU used, an impl with separate fc for gate & x is used self.mlp = SwiGLU( in_features=dim, hidden_features=hidden_features, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop, ) else: # w/o any extra norm, an impl with packed weights is used, matches existing GluMLP self.mlp = GluMlp( in_features=dim, hidden_features=hidden_features * 2, norm_layer=norm_layer if scale_mlp else None, act_layer=nn.SiLU, gate_last=False, drop=proj_drop, ) else: self.mlp = Mlp( in_features=dim, hidden_features=hidden_features, act_layer=act_layer, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop, ) self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) if init_values is not None else None self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None): if self.gamma_1 is None: x = x + self.drop_path1(self.attn(self.norm1(x), rope=rope, attn_mask=attn_mask)) x = x + self.drop_path2(self.mlp(self.norm2(x))) else: x = x + self.drop_path1(self.gamma_1 * self.attn(self.norm1(x), rope=rope, attn_mask=attn_mask)) x = x + self.drop_path2(self.gamma_2 * self.mlp(self.norm2(x))) return x class EvaBlockPostNorm(nn.Module): """ EVA block w/ post-norm and support for swiglu, MLP norm scale, ROPE. """ def __init__( self, dim: int, num_heads: int, qkv_bias: bool = True, qkv_fused: bool = True, mlp_ratio: float = 4., swiglu_mlp: bool = False, scale_mlp: bool = False, scale_attn_inner: bool = False, proj_drop: float = 0., attn_drop: float = 0., drop_path: float = 0., init_values: Optional[float] = None, # ignore for post-norm act_layer: Callable = nn.GELU, norm_layer: Callable = nn.LayerNorm, attn_head_dim: Optional[int] = None, ): """ Args: dim: num_heads: qkv_bias: qkv_fused: mlp_ratio: swiglu_mlp: scale_mlp: scale_attn_inner: proj_drop: attn_drop: drop_path: init_values: act_layer: norm_layer: attn_head_dim: """ super().__init__() self.attn = EvaAttention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qkv_fused=qkv_fused, attn_drop=attn_drop, proj_drop=proj_drop, attn_head_dim=attn_head_dim, norm_layer=norm_layer if scale_attn_inner else None, ) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() hidden_features = int(dim * mlp_ratio) if swiglu_mlp: if scale_mlp: # when norm in SwiGLU used, an impl with separate fc for gate & x is used self.mlp = SwiGLU( in_features=dim, hidden_features=hidden_features, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop, ) else: # w/o any extra norm, an impl with packed fc1 weights is used, matches existing GluMLP self.mlp = GluMlp( in_features=dim, hidden_features=hidden_features * 2, norm_layer=norm_layer if scale_mlp else None, act_layer=nn.SiLU, gate_last=False, drop=proj_drop, ) else: self.mlp = Mlp( in_features=dim, hidden_features=hidden_features, act_layer=act_layer, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop, ) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None): x = x + self.drop_path1(self.norm1(self.attn(x, rope=rope, attn_mask=attn_mask))) x = x + self.drop_path2(self.norm2(self.mlp(x))) return x class Eva(nn.Module): """ Eva Vision Transformer w/ Abs & Rotary Pos Embed This class implements the EVA and EVA02 models that were based on the BEiT ViT variant * EVA - abs pos embed, global avg pool * EVA02 - abs + rope pos embed, global avg pool, SwiGLU, scale Norm in MLP (ala normformer) """ def __init__( self, img_size: Union[int, Tuple[int, int]] = 224, patch_size: Union[int, Tuple[int, int]] = 16, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', embed_dim: int = 768, depth: int = 12, num_heads: int = 12, qkv_bias: bool = True, qkv_fused: bool = True, mlp_ratio: float = 4., swiglu_mlp: bool = False, scale_mlp: bool = False, scale_attn_inner: bool = False, drop_rate: float = 0., pos_drop_rate: float = 0., patch_drop_rate: float = 0., proj_drop_rate: float = 0., attn_drop_rate: float = 0., drop_path_rate: float = 0., norm_layer: Callable = LayerNorm, init_values: Optional[float] = None, class_token: bool = True, use_abs_pos_emb: bool = True, use_rot_pos_emb: bool = False, use_post_norm: bool = False, dynamic_img_size: bool = False, dynamic_img_pad: bool = False, ref_feat_shape: Optional[Union[Tuple[int, int], int]] = None, head_init_scale: float = 0.001, ): """ Args: img_size: patch_size: in_chans: num_classes: global_pool: embed_dim: depth: num_heads: qkv_bias: qkv_fused: mlp_ratio: swiglu_mlp: scale_mlp: scale_attn_inner: drop_rate: pos_drop_rate: proj_drop_rate: attn_drop_rate: drop_path_rate: norm_layer: init_values: class_token: use_abs_pos_emb: use_rot_pos_emb: use_post_norm: ref_feat_shape: head_init_scale: """ super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_prefix_tokens = 1 if class_token else 0 self.dynamic_img_size = dynamic_img_size self.grad_checkpointing = False embed_args = {} if dynamic_img_size: # flatten deferred until after pos embed embed_args.update(dict(strict_img_size=False, output_fmt='NHWC')) self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, dynamic_img_pad=dynamic_img_pad, **embed_args, ) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None self.pos_embed = nn.Parameter( torch.zeros(1, num_patches + self.num_prefix_tokens, embed_dim)) if use_abs_pos_emb else None self.pos_drop = nn.Dropout(p=pos_drop_rate) if patch_drop_rate > 0: self.patch_drop = PatchDropout( patch_drop_rate, num_prefix_tokens=self.num_prefix_tokens, return_indices=True, ) else: self.patch_drop = None if use_rot_pos_emb: ref_feat_shape = to_2tuple(ref_feat_shape) if ref_feat_shape is not None else None self.rope = RotaryEmbeddingCat( embed_dim // num_heads, in_pixels=False, feat_shape=None if dynamic_img_size else self.patch_embed.grid_size, ref_feat_shape=ref_feat_shape, ) else: self.rope = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule block_fn = EvaBlockPostNorm if use_post_norm else EvaBlock self.blocks = nn.ModuleList([ block_fn( dim=embed_dim, num_heads=num_heads, qkv_bias=qkv_bias, qkv_fused=qkv_fused, mlp_ratio=mlp_ratio, swiglu_mlp=swiglu_mlp, scale_mlp=scale_mlp, scale_attn_inner=scale_attn_inner, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, ) for i in range(depth)]) use_fc_norm = self.global_pool == 'avg' self.norm = nn.Identity() if use_fc_norm else norm_layer(embed_dim) self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity() self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) if self.cls_token is not None: trunc_normal_(self.cls_token, std=.02) self.fix_init_weight() if isinstance(self.head, nn.Linear): trunc_normal_(self.head.weight, std=.02) self.head.weight.data.mul_(head_init_scale) self.head.bias.data.mul_(head_init_scale) def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if m.bias is not None: nn.init.zeros_(m.bias) @torch.jit.ignore def no_weight_decay(self): nwd = {'pos_embed', 'cls_token'} return nwd @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))], ) return matcher @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def _pos_embed(self, x) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: if self.dynamic_img_size: B, H, W, C = x.shape if self.pos_embed is not None: pos_embed = resample_abs_pos_embed( self.pos_embed, (H, W), num_prefix_tokens=self.num_prefix_tokens, ) else: pos_embed = None x = x.view(B, -1, C) rot_pos_embed = self.rope.get_embed(shape=(H, W)) if self.rope is not None else None else: pos_embed = self.pos_embed rot_pos_embed = self.rope.get_embed() if self.rope is not None else None if self.cls_token is not None: x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) if pos_embed is not None: x = x + pos_embed x = self.pos_drop(x) # obtain shared rotary position embedding and apply patch dropout if self.patch_drop is not None: x, keep_indices = self.patch_drop(x) if rot_pos_embed is not None and keep_indices is not None: rot_pos_embed = apply_keep_indices_nlc(x, rot_pos_embed, keep_indices) return x, rot_pos_embed def forward_features(self, x): x = self.patch_embed(x) x, rot_pos_embed = self._pos_embed(x) for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x, rope=rot_pos_embed) else: x = blk(x, rope=rot_pos_embed) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.fc_norm(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn( state_dict, model, interpolation='bicubic', antialias=True, ): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} state_dict = state_dict.get('model_ema', state_dict) state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('module', state_dict) state_dict = state_dict.get('state_dict', state_dict) # prefix for loading OpenCLIP compatible weights if 'visual.trunk.pos_embed' in state_dict: prefix = 'visual.trunk.' elif 'visual.pos_embed' in state_dict: prefix = 'visual.' else: prefix = '' mim_weights = prefix + 'mask_token' in state_dict no_qkv = prefix + 'blocks.0.attn.q_proj.weight' in state_dict len_prefix = len(prefix) for k, v in state_dict.items(): if prefix: if k.startswith(prefix): k = k[len_prefix:] else: continue if 'rope' in k: # fixed embedding no need to load buffer from checkpoint continue if 'patch_embed.proj.weight' in k: _, _, H, W = model.patch_embed.proj.weight.shape if v.shape[-1] != W or v.shape[-2] != H: v = resample_patch_embed( v, (H, W), interpolation=interpolation, antialias=antialias, verbose=True, ) elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: # To resize pos embedding when using model at different size from pretrained weights num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1) v = resample_abs_pos_embed( v, new_size=model.patch_embed.grid_size, num_prefix_tokens=num_prefix_tokens, interpolation=interpolation, antialias=antialias, verbose=True, ) k = k.replace('mlp.ffn_ln', 'mlp.norm') k = k.replace('attn.inner_attn_ln', 'attn.norm') k = k.replace('mlp.w12', 'mlp.fc1') k = k.replace('mlp.w1', 'mlp.fc1_g') k = k.replace('mlp.w2', 'mlp.fc1_x') k = k.replace('mlp.w3', 'mlp.fc2') if no_qkv: k = k.replace('q_bias', 'q_proj.bias') k = k.replace('v_bias', 'v_proj.bias') if mim_weights and k in ('mask_token', 'lm_head.weight', 'lm_head.bias', 'norm.weight', 'norm.bias'): if k == 'norm.weight' or k == 'norm.bias': # try moving norm -> fc norm on fine-tune, probably a better starting point than new init k = k.replace('norm', 'fc_norm') else: # skip pretrain mask token & head weights continue out_dict[k] = v return out_dict def _create_eva(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Eva models.') model = build_model_with_cfg( Eva, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': OPENAI_CLIP_MEAN, 'std': OPENAI_CLIP_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', 'license': 'mit', **kwargs } default_cfgs = generate_default_cfgs({ # EVA 01 CLIP fine-tuned on imagenet-1k 'eva_giant_patch14_224.clip_ft_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_clip_vis_enc_sz224_ftcls_89p1.pt', hf_hub_id='timm/', ), 'eva_giant_patch14_336.clip_ft_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_clip_vis_enc_sz336_ftcls_89p4.pt', hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), # MIM EVA 01 pretrain, ft on in22k -> in1k 'eva_giant_patch14_336.m30m_ft_in22k_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_21k_1k_336px_psz14_ema_89p6.pt', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), 'eva_giant_patch14_560.m30m_ft_in22k_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_21k_1k_560px_psz14_ema_89p7.pt', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 560, 560), crop_pct=1.0, crop_mode='squash'), # in22k or m38m MIM pretrain w/ intermediate in22k fine-tune and final in1k fine-tune 'eva02_base_patch14_448.mim_in22k_ft_in22k_in1k': _cfg( # hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_B_pt_in21k_medft_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', ), 'eva02_large_patch14_448.mim_in22k_ft_in22k_in1k': _cfg( # hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_L_pt_in21k_medft_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', ), 'eva02_large_patch14_448.mim_m38m_ft_in22k_in1k': _cfg( hf_hub_id='timm/', #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_L_pt_m38m_medft_in21k_ft_in1k_p14.pt', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', ), # in22k or m3m MIM pretrain w/ in1k fine-tune 'eva02_tiny_patch14_336.mim_in22k_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_Ti_pt_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0, ), 'eva02_small_patch14_336.mim_in22k_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_S_pt_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0, ), 'eva02_base_patch14_448.mim_in22k_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_B_pt_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, ), 'eva02_large_patch14_448.mim_in22k_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_L_pt_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, ), 'eva02_large_patch14_448.mim_m38m_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_L_pt_m38m_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, ), # in22k or m3m MIM pretrain w/ in22k fine-tune 'eva02_base_patch14_448.mim_in22k_ft_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_B_pt_in21k_medft_in21k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841, ), 'eva02_large_patch14_448.mim_in22k_ft_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_L_pt_in21k_medft_in21k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841, ), 'eva02_large_patch14_448.mim_m38m_ft_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_L_pt_m38m_medft_in21k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841, ), # in22k or m38m MIM pretrain 'eva02_tiny_patch14_224.mim_in22k': _cfg( # hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_Ti_pt_in21k_p14.pt', hf_hub_id='timm/', num_classes=0, ), 'eva02_small_patch14_224.mim_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_S_pt_in21k_p14.pt', hf_hub_id='timm/', num_classes=0, ), 'eva02_base_patch14_224.mim_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_B_pt_in21k_p14.pt', hf_hub_id='timm/', num_classes=0, ), 'eva02_large_patch14_224.mim_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_L_pt_in21k_p14.pt', hf_hub_id='timm/', num_classes=0, ), 'eva02_large_patch14_224.mim_m38m': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_L_pt_m38m_p14.pt', hf_hub_id='timm/', num_classes=0, ), # EVA01 and EVA02 CLIP image towers 'eva_giant_patch14_clip_224.laion400m': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA01_CLIP_g_14_plus_psz14_s11B.pt', hf_hub_id='timm/eva_giant_patch14_clip_224.laion400m_s11b_b41k', # float16 weights hf_hub_filename='open_clip_pytorch_model.bin', num_classes=1024, ), 'eva_giant_patch14_clip_224.merged2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA01_CLIP_g_14_plus_psz14_s11B.pt', hf_hub_id='timm/eva_giant_patch14_plus_clip_224.merged2b_s11b_b114k', # float16 weights hf_hub_filename='open_clip_pytorch_model.bin', num_classes=1024, ), 'eva02_base_patch16_clip_224.merged2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt', hf_hub_id='timm/eva02_base_patch16_clip_224.merged2b_s8b_b131k', # float16 weights hf_hub_filename='open_clip_pytorch_model.bin', num_classes=512, ), 'eva02_large_patch14_clip_224.merged2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt', hf_hub_id='timm/eva02_large_patch14_clip_224.merged2b_s4b_b131k', # float16 weights hf_hub_filename='open_clip_pytorch_model.bin', num_classes=768, ), 'eva02_large_patch14_clip_336.merged2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt', hf_hub_id='timm/eva02_large_patch14_clip_336.merged2b_s6b_b61k', # float16 weights hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 336, 336), crop_pct=1.0, num_classes=768, ), 'eva02_enormous_patch14_clip_224.laion2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_E_psz14_plus_s9B.pt', hf_hub_id='timm/eva02_enormous_patch14_clip_224.laion2b_s4b_b115k', # float16 weights hf_hub_filename='open_clip_pytorch_model.bin', num_classes=1024, ), 'eva02_enormous_patch14_clip_224.laion2b_plus': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_E_psz14_plus_s9B.pt', hf_hub_id='timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k', # bfloat16 weights hf_hub_filename='open_clip_pytorch_model.bin', num_classes=1024, ), 'eva02_enormous_patch14_clip_224.pretrain': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_E_psz14.pt', num_classes=0, ), }) @register_model def eva_giant_patch14_224(pretrained=False, **kwargs) -> Eva: """ EVA-g model https://arxiv.org/abs/2211.07636 """ model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) model = _create_eva('eva_giant_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_giant_patch14_336(pretrained=False, **kwargs) -> Eva: """ EVA-g model https://arxiv.org/abs/2211.07636 """ model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) model = _create_eva('eva_giant_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_giant_patch14_560(pretrained=False, **kwargs) -> Eva: """ EVA-g model https://arxiv.org/abs/2211.07636 """ model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) model = _create_eva('eva_giant_patch14_560', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_tiny_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=224, patch_size=14, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_tiny_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_small_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=224, patch_size=14, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_small_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_base_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=224, patch_size=14, embed_dim=768, depth=12, num_heads=12, qkv_fused=False, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_base_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=224, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_large_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_tiny_patch14_336(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=336, patch_size=14, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_tiny_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_small_patch14_336(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=336, patch_size=14, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_small_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_base_patch14_448(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=448, patch_size=14, embed_dim=768, depth=12, num_heads=12, qkv_fused=False, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_base_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_448(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=448, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_large_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_giant_patch14_clip_224(pretrained=False, **kwargs) -> Eva: """ EVA-g CLIP model (only difference from non-CLIP is the pooling) """ model_args = dict( patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408, global_pool=kwargs.pop('global_pool', 'token')) model = _create_eva('eva_giant_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_base_patch16_clip_224(pretrained=False, **kwargs) -> Eva: """ A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_base """ model_args = dict( img_size=224, patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_fused=False, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, scale_mlp=True, scale_attn_inner=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 global_pool=kwargs.pop('global_pool', 'token'), ) model = _create_eva('eva02_base_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_clip_224(pretrained=False, **kwargs) -> Eva: """ A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_large """ model_args = dict( img_size=224, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, scale_attn_inner=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 global_pool=kwargs.pop('global_pool', 'token'), ) model = _create_eva('eva02_large_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_clip_336(pretrained=False, **kwargs) -> Eva: """ A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_large """ model_args = dict( img_size=336, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, scale_attn_inner=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 global_pool=kwargs.pop('global_pool', 'token'), ) model = _create_eva('eva02_large_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_enormous_patch14_clip_224(pretrained=False, **kwargs) -> Eva: """ A EVA-CLIP specific variant that uses residual post-norm in blocks """ model_args = dict( img_size=224, patch_size=14, embed_dim=1792, depth=64, num_heads=16, mlp_ratio=15360 / 1792, use_post_norm=True, global_pool=kwargs.pop('global_pool', 'token'), ) model = _create_eva('eva02_enormous_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model
pytorch-image-models/timm/models/eva.py/0
{ "file_path": "pytorch-image-models/timm/models/eva.py", "repo_id": "pytorch-image-models", "token_count": 21637 }
176
# NOTE timm.models.layers is DEPRECATED, please use timm.layers, this is here to reduce breakages in transition from timm.layers.activations import * from timm.layers.adaptive_avgmax_pool import \ adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d from timm.layers.attention_pool2d import AttentionPool2d, RotAttentionPool2d, RotaryEmbedding from timm.layers.blur_pool import BlurPool2d from timm.layers.classifier import ClassifierHead, create_classifier from timm.layers.cond_conv2d import CondConv2d, get_condconv_initializer from timm.layers.config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\ set_layer_config from timm.layers.conv2d_same import Conv2dSame, conv2d_same from timm.layers.conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct from timm.layers.create_act import create_act_layer, get_act_layer, get_act_fn from timm.layers.create_attn import get_attn, create_attn from timm.layers.create_conv2d import create_conv2d from timm.layers.create_norm import get_norm_layer, create_norm_layer from timm.layers.create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer from timm.layers.drop import DropBlock2d, DropPath, drop_block_2d, drop_path from timm.layers.eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn from timm.layers.evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2,\ EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a from timm.layers.fast_norm import is_fast_norm, set_fast_norm, fast_group_norm, fast_layer_norm from timm.layers.filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d from timm.layers.gather_excite import GatherExcite from timm.layers.global_context import GlobalContext from timm.layers.helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible, extend_tuple from timm.layers.inplace_abn import InplaceAbn from timm.layers.linear import Linear from timm.layers.mixed_conv2d import MixedConv2d from timm.layers.mlp import Mlp, GluMlp, GatedMlp, ConvMlp from timm.layers.non_local_attn import NonLocalAttn, BatNonLocalAttn from timm.layers.norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d from timm.layers.norm_act import BatchNormAct2d, GroupNormAct, convert_sync_batchnorm from timm.layers.padding import get_padding, get_same_padding, pad_same from timm.layers.patch_embed import PatchEmbed from timm.layers.pool2d_same import AvgPool2dSame, create_pool2d from timm.layers.squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite from timm.layers.selective_kernel import SelectiveKernel from timm.layers.separable_conv import SeparableConv2d, SeparableConvNormAct from timm.layers.space_to_depth import SpaceToDepthModule from timm.layers.split_attn import SplitAttn from timm.layers.split_batchnorm import SplitBatchNorm2d, convert_splitbn_model from timm.layers.std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame from timm.layers.test_time_pool import TestTimePoolHead, apply_test_time_pool from timm.layers.trace_utils import _assert, _float_to_int from timm.layers.weight_init import trunc_normal_, trunc_normal_tf_, variance_scaling_, lecun_normal_ import warnings warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.layers", DeprecationWarning)
pytorch-image-models/timm/models/layers/__init__.py/0
{ "file_path": "pytorch-image-models/timm/models/layers/__init__.py", "repo_id": "pytorch-image-models", "token_count": 1240 }
177
""" An implementation of RepGhostNet Model as defined in: RepGhost: A Hardware-Efficient Ghost Module via Re-parameterization. https://arxiv.org/abs/2211.06088 Original implementation: https://github.com/ChengpengChen/RepGhost """ import copy from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectAdaptivePool2d, Linear, make_divisible from ._builder import build_model_with_cfg from ._efficientnet_blocks import SqueezeExcite, ConvBnAct from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['RepGhostNet'] _SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4)) class RepGhostModule(nn.Module): def __init__( self, in_chs, out_chs, kernel_size=1, dw_size=3, stride=1, relu=True, reparam=True, ): super(RepGhostModule, self).__init__() self.out_chs = out_chs init_chs = out_chs new_chs = out_chs self.primary_conv = nn.Sequential( nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False), nn.BatchNorm2d(init_chs), nn.ReLU(inplace=True) if relu else nn.Identity(), ) fusion_conv = [] fusion_bn = [] if reparam: fusion_conv.append(nn.Identity()) fusion_bn.append(nn.BatchNorm2d(init_chs)) self.fusion_conv = nn.Sequential(*fusion_conv) self.fusion_bn = nn.Sequential(*fusion_bn) self.cheap_operation = nn.Sequential( nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size//2, groups=init_chs, bias=False), nn.BatchNorm2d(new_chs), # nn.ReLU(inplace=True) if relu else nn.Identity(), ) self.relu = nn.ReLU(inplace=False) if relu else nn.Identity() def forward(self, x): x1 = self.primary_conv(x) x2 = self.cheap_operation(x1) for conv, bn in zip(self.fusion_conv, self.fusion_bn): x2 = x2 + bn(conv(x1)) return self.relu(x2) def get_equivalent_kernel_bias(self): kernel3x3, bias3x3 = self._fuse_bn_tensor(self.cheap_operation[0], self.cheap_operation[1]) for conv, bn in zip(self.fusion_conv, self.fusion_bn): kernel, bias = self._fuse_bn_tensor(conv, bn, kernel3x3.shape[0], kernel3x3.device) kernel3x3 += self._pad_1x1_to_3x3_tensor(kernel) bias3x3 += bias return kernel3x3, bias3x3 @staticmethod def _pad_1x1_to_3x3_tensor(kernel1x1): if kernel1x1 is None: return 0 else: return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1]) @staticmethod def _fuse_bn_tensor(conv, bn, in_channels=None, device=None): in_channels = in_channels if in_channels else bn.running_mean.shape[0] device = device if device else bn.weight.device if isinstance(conv, nn.Conv2d): kernel = conv.weight assert conv.bias is None else: assert isinstance(conv, nn.Identity) kernel = torch.ones(in_channels, 1, 1, 1, device=device) if isinstance(bn, nn.BatchNorm2d): running_mean = bn.running_mean running_var = bn.running_var gamma = bn.weight beta = bn.bias eps = bn.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return kernel * t, beta - running_mean * gamma / std assert isinstance(bn, nn.Identity) return kernel, torch.zeros(in_channels).to(kernel.device) def switch_to_deploy(self): if len(self.fusion_conv) == 0 and len(self.fusion_bn) == 0: return kernel, bias = self.get_equivalent_kernel_bias() self.cheap_operation = nn.Conv2d( in_channels=self.cheap_operation[0].in_channels, out_channels=self.cheap_operation[0].out_channels, kernel_size=self.cheap_operation[0].kernel_size, padding=self.cheap_operation[0].padding, dilation=self.cheap_operation[0].dilation, groups=self.cheap_operation[0].groups, bias=True) self.cheap_operation.weight.data = kernel self.cheap_operation.bias.data = bias self.__delattr__('fusion_conv') self.__delattr__('fusion_bn') self.fusion_conv = [] self.fusion_bn = [] def reparameterize(self): self.switch_to_deploy() class RepGhostBottleneck(nn.Module): """ RepGhost bottleneck w/ optional SE""" def __init__( self, in_chs, mid_chs, out_chs, dw_kernel_size=3, stride=1, act_layer=nn.ReLU, se_ratio=0., reparam=True, ): super(RepGhostBottleneck, self).__init__() has_se = se_ratio is not None and se_ratio > 0. self.stride = stride # Point-wise expansion self.ghost1 = RepGhostModule(in_chs, mid_chs, relu=True, reparam=reparam) # Depth-wise convolution if self.stride > 1: self.conv_dw = nn.Conv2d( mid_chs, mid_chs, dw_kernel_size, stride=stride, padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False) self.bn_dw = nn.BatchNorm2d(mid_chs) else: self.conv_dw = None self.bn_dw = None # Squeeze-and-excitation self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None # Point-wise linear projection self.ghost2 = RepGhostModule(mid_chs, out_chs, relu=False, reparam=reparam) # shortcut if in_chs == out_chs and self.stride == 1: self.shortcut = nn.Sequential() else: self.shortcut = nn.Sequential( nn.Conv2d( in_chs, in_chs, dw_kernel_size, stride=stride, padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False), nn.BatchNorm2d(in_chs), nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False), nn.BatchNorm2d(out_chs), ) def forward(self, x): shortcut = x # 1st ghost bottleneck x = self.ghost1(x) # Depth-wise convolution if self.conv_dw is not None: x = self.conv_dw(x) x = self.bn_dw(x) # Squeeze-and-excitation if self.se is not None: x = self.se(x) # 2nd ghost bottleneck x = self.ghost2(x) x += self.shortcut(shortcut) return x class RepGhostNet(nn.Module): def __init__( self, cfgs, num_classes=1000, width=1.0, in_chans=3, output_stride=32, global_pool='avg', drop_rate=0.2, reparam=True, ): super(RepGhostNet, self).__init__() # setting of inverted residual blocks assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported' self.cfgs = cfgs self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False self.feature_info = [] # building first layer stem_chs = make_divisible(16 * width, 4) self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False) self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem')) self.bn1 = nn.BatchNorm2d(stem_chs) self.act1 = nn.ReLU(inplace=True) prev_chs = stem_chs # building inverted residual blocks stages = nn.ModuleList([]) block = RepGhostBottleneck stage_idx = 0 net_stride = 2 for cfg in self.cfgs: layers = [] s = 1 for k, exp_size, c, se_ratio, s in cfg: out_chs = make_divisible(c * width, 4) mid_chs = make_divisible(exp_size * width, 4) layers.append(block(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio, reparam=reparam)) prev_chs = out_chs if s > 1: net_stride *= 2 self.feature_info.append(dict( num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}')) stages.append(nn.Sequential(*layers)) stage_idx += 1 out_chs = make_divisible(exp_size * width * 2, 4) stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1))) self.pool_dim = prev_chs = out_chs self.blocks = nn.Sequential(*stages) # building last several layers self.num_features = out_chs = 1280 self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True) self.act2 = nn.ReLU(inplace=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^conv_stem|bn1', blocks=[ (r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None), (r'conv_head', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.classifier def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes # cannot meaningfully change pooling of efficient head after creation self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.conv_stem(x) x = self.bn1(x) x = self.act1(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x, flatten=True) else: x = self.blocks(x) return x def forward_head(self, x): x = self.global_pool(x) x = self.conv_head(x) x = self.act2(x) x = self.flatten(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) x = self.classifier(x) return x def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def convert_to_deploy(self): repghost_model_convert(self, do_copy=False) def repghost_model_convert(model: torch.nn.Module, save_path=None, do_copy=True): """ taken from from https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py """ if do_copy: model = copy.deepcopy(model) for module in model.modules(): if hasattr(module, 'switch_to_deploy'): module.switch_to_deploy() if save_path is not None: torch.save(model.state_dict(), save_path) return model def _create_repghostnet(variant, width=1.0, pretrained=False, **kwargs): """ Constructs a RepGhostNet model """ cfgs = [ # k, t, c, SE, s # stage1 [[3, 8, 16, 0, 1]], # stage2 [[3, 24, 24, 0, 2]], [[3, 36, 24, 0, 1]], # stage3 [[5, 36, 40, 0.25, 2]], [[5, 60, 40, 0.25, 1]], # stage4 [[3, 120, 80, 0, 2]], [[3, 100, 80, 0, 1], [3, 120, 80, 0, 1], [3, 120, 80, 0, 1], [3, 240, 112, 0.25, 1], [3, 336, 112, 0.25, 1] ], # stage5 [[5, 336, 160, 0.25, 2]], [[5, 480, 160, 0, 1], [5, 480, 160, 0.25, 1], [5, 480, 160, 0, 1], [5, 480, 160, 0.25, 1] ] ] model_kwargs = dict( cfgs=cfgs, width=width, **kwargs, ) return build_model_with_cfg( RepGhostNet, variant, pretrained, feature_cfg=dict(flatten_sequential=True), **model_kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'repghostnet_050.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_0_5x_43M_66.95.pth.tar' ), 'repghostnet_058.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_0_58x_60M_68.94.pth.tar' ), 'repghostnet_080.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_0_8x_96M_72.24.pth.tar' ), 'repghostnet_100.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_0x_142M_74.22.pth.tar' ), 'repghostnet_111.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_11x_170M_75.07.pth.tar' ), 'repghostnet_130.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_3x_231M_76.37.pth.tar' ), 'repghostnet_150.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_5x_301M_77.45.pth.tar' ), 'repghostnet_200.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_2_0x_516M_78.81.pth.tar' ), }) @register_model def repghostnet_050(pretrained=False, **kwargs) -> RepGhostNet: """ RepGhostNet-0.5x """ model = _create_repghostnet('repghostnet_050', width=0.5, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_058(pretrained=False, **kwargs) -> RepGhostNet: """ RepGhostNet-0.58x """ model = _create_repghostnet('repghostnet_058', width=0.58, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_080(pretrained=False, **kwargs) -> RepGhostNet: """ RepGhostNet-0.8x """ model = _create_repghostnet('repghostnet_080', width=0.8, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_100(pretrained=False, **kwargs) -> RepGhostNet: """ RepGhostNet-1.0x """ model = _create_repghostnet('repghostnet_100', width=1.0, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_111(pretrained=False, **kwargs) -> RepGhostNet: """ RepGhostNet-1.11x """ model = _create_repghostnet('repghostnet_111', width=1.11, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_130(pretrained=False, **kwargs) -> RepGhostNet: """ RepGhostNet-1.3x """ model = _create_repghostnet('repghostnet_130', width=1.3, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_150(pretrained=False, **kwargs) -> RepGhostNet: """ RepGhostNet-1.5x """ model = _create_repghostnet('repghostnet_150', width=1.5, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_200(pretrained=False, **kwargs) -> RepGhostNet: """ RepGhostNet-2.0x """ model = _create_repghostnet('repghostnet_200', width=2.0, pretrained=pretrained, **kwargs) return model
pytorch-image-models/timm/models/repghost.py/0
{ "file_path": "pytorch-image-models/timm/models/repghost.py", "repo_id": "pytorch-image-models", "token_count": 8148 }
178
""" TResNet: High Performance GPU-Dedicated Architecture https://arxiv.org/pdf/2003.13630.pdf Original model: https://github.com/mrT23/TResNet """ from collections import OrderedDict from functools import partial import torch import torch.nn as nn from timm.layers import SpaceToDepth, BlurPool2d, ClassifierHead, SEModule,\ ConvNormActAa, ConvNormAct, DropPath from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['TResNet'] # model_registry will add each entrypoint fn to this class BasicBlock(nn.Module): expansion = 1 def __init__( self, inplanes, planes, stride=1, downsample=None, use_se=True, aa_layer=None, drop_path_rate=0. ): super(BasicBlock, self).__init__() self.downsample = downsample self.stride = stride act_layer = partial(nn.LeakyReLU, negative_slope=1e-3) if stride == 1: self.conv1 = ConvNormAct(inplanes, planes, kernel_size=3, stride=1, act_layer=act_layer) else: self.conv1 = ConvNormActAa( inplanes, planes, kernel_size=3, stride=2, act_layer=act_layer, aa_layer=aa_layer) self.conv2 = ConvNormAct(planes, planes, kernel_size=3, stride=1, apply_act=False, act_layer=None) self.act = nn.ReLU(inplace=True) rd_chs = max(planes * self.expansion // 4, 64) self.se = SEModule(planes * self.expansion, rd_channels=rd_chs) if use_se else None self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() def forward(self, x): if self.downsample is not None: shortcut = self.downsample(x) else: shortcut = x out = self.conv1(x) out = self.conv2(out) if self.se is not None: out = self.se(out) out = self.drop_path(out) + shortcut out = self.act(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__( self, inplanes, planes, stride=1, downsample=None, use_se=True, act_layer=None, aa_layer=None, drop_path_rate=0., ): super(Bottleneck, self).__init__() self.downsample = downsample self.stride = stride act_layer = act_layer or partial(nn.LeakyReLU, negative_slope=1e-3) self.conv1 = ConvNormAct( inplanes, planes, kernel_size=1, stride=1, act_layer=act_layer) if stride == 1: self.conv2 = ConvNormAct( planes, planes, kernel_size=3, stride=1, act_layer=act_layer) else: self.conv2 = ConvNormActAa( planes, planes, kernel_size=3, stride=2, act_layer=act_layer, aa_layer=aa_layer) reduction_chs = max(planes * self.expansion // 8, 64) self.se = SEModule(planes, rd_channels=reduction_chs) if use_se else None self.conv3 = ConvNormAct( planes, planes * self.expansion, kernel_size=1, stride=1, apply_act=False, act_layer=None) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.act = nn.ReLU(inplace=True) def forward(self, x): if self.downsample is not None: shortcut = self.downsample(x) else: shortcut = x out = self.conv1(x) out = self.conv2(out) if self.se is not None: out = self.se(out) out = self.conv3(out) out = self.drop_path(out) + shortcut out = self.act(out) return out class TResNet(nn.Module): def __init__( self, layers, in_chans=3, num_classes=1000, width_factor=1.0, v2=False, global_pool='fast', drop_rate=0., drop_path_rate=0., ): self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False super(TResNet, self).__init__() aa_layer = BlurPool2d act_layer = nn.LeakyReLU # TResnet stages self.inplanes = int(64 * width_factor) self.planes = int(64 * width_factor) if v2: self.inplanes = self.inplanes // 8 * 8 self.planes = self.planes // 8 * 8 dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] conv1 = ConvNormAct(in_chans * 16, self.planes, stride=1, kernel_size=3, act_layer=act_layer) layer1 = self._make_layer( Bottleneck if v2 else BasicBlock, self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[0]) layer2 = self._make_layer( Bottleneck if v2 else BasicBlock, self.planes * 2, layers[1], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[1]) layer3 = self._make_layer( Bottleneck, self.planes * 4, layers[2], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[2]) layer4 = self._make_layer( Bottleneck, self.planes * 8, layers[3], stride=2, use_se=False, aa_layer=aa_layer, drop_path_rate=dpr[3]) # body self.body = nn.Sequential(OrderedDict([ ('s2d', SpaceToDepth()), ('conv1', conv1), ('layer1', layer1), ('layer2', layer2), ('layer3', layer3), ('layer4', layer4), ])) self.feature_info = [ dict(num_chs=self.planes, reduction=2, module=''), # Not with S2D? dict(num_chs=self.planes * (Bottleneck.expansion if v2 else 1), reduction=4, module='body.layer1'), dict(num_chs=self.planes * 2 * (Bottleneck.expansion if v2 else 1), reduction=8, module='body.layer2'), dict(num_chs=self.planes * 4 * Bottleneck.expansion, reduction=16, module='body.layer3'), dict(num_chs=self.planes * 8 * Bottleneck.expansion, reduction=32, module='body.layer4'), ] # head self.num_features = (self.planes * 8) * Bottleneck.expansion self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) # model initialization for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu') if isinstance(m, nn.Linear): m.weight.data.normal_(0, 0.01) # residual connections special initialization for m in self.modules(): if isinstance(m, BasicBlock): nn.init.zeros_(m.conv2.bn.weight) if isinstance(m, Bottleneck): nn.init.zeros_(m.conv3.bn.weight) def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None, drop_path_rate=0.): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: layers = [] if stride == 2: # avg pooling before 1x1 conv layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False)) layers += [ConvNormAct( self.inplanes, planes * block.expansion, kernel_size=1, stride=1, apply_act=False, act_layer=None)] downsample = nn.Sequential(*layers) layers = [] for i in range(blocks): layers.append(block( self.inplanes, planes, stride=stride if i == 0 else 1, downsample=downsample if i == 0 else None, use_se=use_se, aa_layer=aa_layer, drop_path_rate=drop_path_rate[i] if isinstance(drop_path_rate, list) else drop_path_rate, )) self.inplanes = planes * block.expansion return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem=r'^body\.conv1', blocks=r'^body\.layer(\d+)' if coarse else r'^body\.layer(\d+)\.(\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool=None): self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): if self.grad_checkpointing and not torch.jit.is_scripting(): x = self.body.s2d(x) x = self.body.conv1(x) x = checkpoint_seq([ self.body.layer1, self.body.layer2, self.body.layer3, self.body.layer4], x, flatten=True) else: x = self.body(x) return x def forward_head(self, x, pre_logits: bool = False): return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'body.conv1.conv.weight' in state_dict: return state_dict import re state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) out_dict = {} for k, v in state_dict.items(): k = re.sub(r'conv(\d+)\.0.0', lambda x: f'conv{int(x.group(1))}.conv', k) k = re.sub(r'conv(\d+)\.0.1', lambda x: f'conv{int(x.group(1))}.bn', k) k = re.sub(r'conv(\d+)\.0', lambda x: f'conv{int(x.group(1))}.conv', k) k = re.sub(r'conv(\d+)\.1', lambda x: f'conv{int(x.group(1))}.bn', k) k = re.sub(r'downsample\.(\d+)\.0', lambda x: f'downsample.{int(x.group(1))}.conv', k) k = re.sub(r'downsample\.(\d+)\.1', lambda x: f'downsample.{int(x.group(1))}.bn', k) if k.endswith('bn.weight'): # convert weight from inplace_abn to batchnorm v = v.abs().add(1e-5) out_dict[k] = v return out_dict def _create_tresnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( TResNet, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(1, 2, 3, 4), flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': (0., 0., 0.), 'std': (1., 1., 1.), 'first_conv': 'body.conv1.conv', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'tresnet_m.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_m.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221), 'tresnet_m.miil_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_l.miil_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_xl.miil_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_m.miil_in1k_448': _cfg( input_size=(3, 448, 448), pool_size=(14, 14), hf_hub_id='timm/'), 'tresnet_l.miil_in1k_448': _cfg( input_size=(3, 448, 448), pool_size=(14, 14), hf_hub_id='timm/'), 'tresnet_xl.miil_in1k_448': _cfg( input_size=(3, 448, 448), pool_size=(14, 14), hf_hub_id='timm/'), 'tresnet_v2_l.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_v2_l.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221), }) @register_model def tresnet_m(pretrained=False, **kwargs) -> TResNet: model_args = dict(layers=[3, 4, 11, 3]) return _create_tresnet('tresnet_m', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def tresnet_l(pretrained=False, **kwargs) -> TResNet: model_args = dict(layers=[4, 5, 18, 3], width_factor=1.2) return _create_tresnet('tresnet_l', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def tresnet_xl(pretrained=False, **kwargs) -> TResNet: model_args = dict(layers=[4, 5, 24, 3], width_factor=1.3) return _create_tresnet('tresnet_xl', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def tresnet_v2_l(pretrained=False, **kwargs) -> TResNet: model_args = dict(layers=[3, 4, 23, 3], width_factor=1.0, v2=True) return _create_tresnet('tresnet_v2_l', pretrained=pretrained, **dict(model_args, **kwargs)) register_model_deprecations(__name__, { 'tresnet_m_miil_in21k': 'tresnet_m.miil_in21k', 'tresnet_m_448': 'tresnet_m.miil_in1k_448', 'tresnet_l_448': 'tresnet_l.miil_in1k_448', 'tresnet_xl_448': 'tresnet_xl.miil_in1k_448', })
pytorch-image-models/timm/models/tresnet.py/0
{ "file_path": "pytorch-image-models/timm/models/tresnet.py", "repo_id": "pytorch-image-models", "token_count": 6338 }
179
""" AdaHessian Optimizer Lifted from https://github.com/davda54/ada-hessian/blob/master/ada_hessian.py Originally licensed MIT, Copyright 2020, David Samuel """ import torch class Adahessian(torch.optim.Optimizer): """ Implements the AdaHessian algorithm from "ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning" Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 0.1) betas ((float, float), optional): coefficients used for computing running averages of gradient and the squared hessian trace (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0) hessian_power (float, optional): exponent of the hessian trace (default: 1.0) update_each (int, optional): compute the hessian trace approximation only after *this* number of steps (to save time) (default: 1) n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1) """ def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False): if not 0.0 <= lr: raise ValueError(f"Invalid learning rate: {lr}") if not 0.0 <= eps: raise ValueError(f"Invalid epsilon value: {eps}") if not 0.0 <= betas[0] < 1.0: raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") if not 0.0 <= betas[1] < 1.0: raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") if not 0.0 <= hessian_power <= 1.0: raise ValueError(f"Invalid Hessian power value: {hessian_power}") self.n_samples = n_samples self.update_each = update_each self.avg_conv_kernel = avg_conv_kernel # use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training self.seed = 2147483647 self.generator = torch.Generator().manual_seed(self.seed) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power) super(Adahessian, self).__init__(params, defaults) for p in self.get_params(): p.hess = 0.0 self.state[p]["hessian step"] = 0 @property def is_second_order(self): return True def get_params(self): """ Gets all parameters in all param_groups with gradients """ return (p for group in self.param_groups for p in group['params'] if p.requires_grad) def zero_hessian(self): """ Zeros out the accumalated hessian traces. """ for p in self.get_params(): if not isinstance(p.hess, float) and self.state[p]["hessian step"] % self.update_each == 0: p.hess.zero_() @torch.no_grad() def set_hessian(self): """ Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter. """ params = [] for p in filter(lambda p: p.grad is not None, self.get_params()): if self.state[p]["hessian step"] % self.update_each == 0: # compute the trace only each `update_each` step params.append(p) self.state[p]["hessian step"] += 1 if len(params) == 0: return if self.generator.device != params[0].device: # hackish way of casting the generator to the right device self.generator = torch.Generator(params[0].device).manual_seed(self.seed) grads = [p.grad for p in params] for i in range(self.n_samples): # Rademacher distribution {-1.0, 1.0} zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params] h_zs = torch.autograd.grad( grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1) for h_z, z, p in zip(h_zs, zs, params): p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z) @torch.no_grad() def step(self, closure=None): """ Performs a single optimization step. Arguments: closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None) """ loss = None if closure is not None: loss = closure() self.zero_hessian() self.set_hessian() for group in self.param_groups: for p in group['params']: if p.grad is None or p.hess is None: continue if self.avg_conv_kernel and p.dim() == 4: p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone() # Perform correct stepweight decay as in AdamW p.mul_(1 - group['lr'] * group['weight_decay']) state = self.state[p] # State initialization if len(state) == 1: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p) # Exponential moving average of Hessian diagonal square values state['exp_hessian_diag_sq'] = torch.zeros_like(p) exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq'] beta1, beta2 = group['betas'] state['step'] += 1 # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1) exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2) bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] k = group['hessian_power'] denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps']) # make update step_size = group['lr'] / bias_correction1 p.addcdiv_(exp_avg, denom, value=-step_size) return loss
pytorch-image-models/timm/optim/adahessian.py/0
{ "file_path": "pytorch-image-models/timm/optim/adahessian.py", "repo_id": "pytorch-image-models", "token_count": 2955 }
180
from functools import update_wrapper, wraps import torch from torch import Tensor from torch.optim.optimizer import Optimizer try: from torch.optim.optimizer import _use_grad_for_differentiable, _default_to_fused_or_foreach has_recent_pt = True except ImportError: has_recent_pt = False from typing import List, Optional __all__ = ['SGDW', 'sgdw'] class SGDW(Optimizer): def __init__( self, params, lr=1e-3, momentum=0, dampening=0, weight_decay=0, nesterov=False, *, maximize: bool = False, foreach: Optional[bool] = None, differentiable: bool = False, ): if lr < 0.0: raise ValueError(f"Invalid learning rate: {lr}") if momentum < 0.0: raise ValueError(f"Invalid momentum value: {momentum}") if weight_decay < 0.0: raise ValueError(f"Invalid weight_decay value: {weight_decay}") defaults = dict( lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, maximize=maximize, foreach=foreach, differentiable=differentiable) if nesterov and (momentum <= 0 or dampening != 0): raise ValueError("Nesterov momentum requires a momentum and zero dampening") super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('nesterov', False) group.setdefault('maximize', False) group.setdefault('foreach', None) group.setdefault('differentiable', False) def _init_group(self, group, params_with_grad, d_p_list, momentum_buffer_list): has_sparse_grad = False for p in group['params']: if p.grad is not None: params_with_grad.append(p) d_p_list.append(p.grad) if p.grad.is_sparse: has_sparse_grad = True state = self.state[p] if 'momentum_buffer' not in state: momentum_buffer_list.append(None) else: momentum_buffer_list.append(state['momentum_buffer']) return has_sparse_grad # FIXME figure out how to make _use_grad_for_differentiable interchangeable with no_grad decorator # without args, for backwards compatibility with old pytorch @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] d_p_list = [] momentum_buffer_list = [] has_sparse_grad = self._init_group(group, params_with_grad, d_p_list, momentum_buffer_list) sgdw( params_with_grad, d_p_list, momentum_buffer_list, weight_decay=group['weight_decay'], momentum=group['momentum'], lr=group['lr'], dampening=group['dampening'], nesterov=group['nesterov'], maximize=group['maximize'], has_sparse_grad=has_sparse_grad, foreach=group['foreach'], ) # update momentum_buffers in state for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list): state = self.state[p] state['momentum_buffer'] = momentum_buffer return loss def sgdw( params: List[Tensor], d_p_list: List[Tensor], momentum_buffer_list: List[Optional[Tensor]], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting this as kwarg for now as functional API is compiled by torch/distributed/optim has_sparse_grad: bool = None, foreach: Optional[bool] = None, *, weight_decay: float, momentum: float, lr: float, dampening: float, nesterov: bool, maximize: bool ): r"""Functional API that performs SGD algorithm computation. See :class:`~torch.optim.SGD` for details. """ if has_recent_pt and hasattr(Optimizer, '_group_tensors_by_device_and_dtype'): if foreach is None: # why must we be explicit about an if statement for torch.jit.is_scripting here? # because JIT can't handle Optionals nor fancy conditionals when scripting if not torch.jit.is_scripting(): _, foreach = _default_to_fused_or_foreach(params, differentiable=False, use_fused=False) else: foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError('torch.jit.script not supported with foreach optimizers') else: foreach = False # disabling altogether for older pytorch, as using _group_tensors_by_device_and_dtype if foreach and not torch.jit.is_scripting(): func = _multi_tensor_sgdw else: func = _single_tensor_sgdw func( params, d_p_list, momentum_buffer_list, weight_decay=weight_decay, momentum=momentum, lr=lr, dampening=dampening, nesterov=nesterov, has_sparse_grad=has_sparse_grad, maximize=maximize, ) def _single_tensor_sgdw( params: List[Tensor], d_p_list: List[Tensor], momentum_buffer_list: List[Optional[Tensor]], *, weight_decay: float, momentum: float, lr: float, dampening: float, nesterov: bool, maximize: bool, has_sparse_grad: bool ): for i, param in enumerate(params): d_p = d_p_list[i] if not maximize else -d_p_list[i] param.mul_(1. - lr * weight_decay) if momentum != 0: buf = momentum_buffer_list[i] if buf is None: buf = torch.clone(d_p).detach() momentum_buffer_list[i] = buf else: buf.mul_(momentum).add_(d_p, alpha=1 - dampening) if nesterov: d_p = d_p.add(buf, alpha=momentum) else: d_p = buf param.add_(d_p, alpha=-lr) def _multi_tensor_sgdw( params: List[Tensor], grads: List[Tensor], momentum_buffer_list: List[Optional[Tensor]], *, weight_decay: float, momentum: float, lr: float, dampening: float, nesterov: bool, maximize: bool, has_sparse_grad: bool ): if len(params) == 0: return grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( [params, grads, momentum_buffer_list], with_indices=True) for ((device_params, device_grads, device_momentum_buffer_list), indices) in grouped_tensors.values(): device_has_sparse_grad = has_sparse_grad and any(grad.is_sparse for grad in device_grads) if maximize: device_grads = torch._foreach_neg(device_grads) torch._foreach_mul_(params, 1. - lr * weight_decay) if momentum != 0: bufs = [] all_states_with_momentum_buffer = True for i in range(len(device_momentum_buffer_list)): if device_momentum_buffer_list[i] is None: all_states_with_momentum_buffer = False break else: bufs.append(device_momentum_buffer_list[i]) if all_states_with_momentum_buffer: torch._foreach_mul_(bufs, momentum) torch._foreach_add_(bufs, device_grads, alpha=1 - dampening) else: bufs = [] for i in range(len(device_momentum_buffer_list)): if device_momentum_buffer_list[i] is None: buf = device_momentum_buffer_list[i] = momentum_buffer_list[indices[i]] = \ torch.clone(device_grads[i]).detach() else: buf = device_momentum_buffer_list[i] buf.mul_(momentum).add_(device_grads[i], alpha=1 - dampening) bufs.append(buf) if nesterov: torch._foreach_add_(device_grads, bufs, alpha=momentum) else: device_grads = bufs if not device_has_sparse_grad: torch._foreach_add_(device_params, device_grads, alpha=-lr) else: # foreach APIs don't support sparse for i in range(len(device_params)): device_params[i].add_(device_grads[i], alpha=-lr)
pytorch-image-models/timm/optim/sgdw.py/0
{ "file_path": "pytorch-image-models/timm/optim/sgdw.py", "repo_id": "pytorch-image-models", "token_count": 4501 }
181
""" Distributed training/validation utils Hacked together by / Copyright 2020 Ross Wightman """ import os import torch from torch import distributed as dist try: import horovod.torch as hvd except ImportError: hvd = None from .model import unwrap_model def reduce_tensor(tensor, n): rt = tensor.clone() dist.all_reduce(rt, op=dist.ReduceOp.SUM) rt /= n return rt def distribute_bn(model, world_size, reduce=False): # ensure every node has the same running bn stats for bn_name, bn_buf in unwrap_model(model).named_buffers(recurse=True): if ('running_mean' in bn_name) or ('running_var' in bn_name): if reduce: # average bn stats across whole group torch.distributed.all_reduce(bn_buf, op=dist.ReduceOp.SUM) bn_buf /= float(world_size) else: # broadcast bn stats from rank 0 to whole group torch.distributed.broadcast(bn_buf, 0) def is_global_primary(args): return args.rank == 0 def is_local_primary(args): return args.local_rank == 0 def is_primary(args, local=False): return is_local_primary(args) if local else is_global_primary(args) def is_distributed_env(): if 'WORLD_SIZE' in os.environ: return int(os.environ['WORLD_SIZE']) > 1 if 'SLURM_NTASKS' in os.environ: return int(os.environ['SLURM_NTASKS']) > 1 return False def world_info_from_env(): local_rank = 0 for v in ('LOCAL_RANK', 'MPI_LOCALRANKID', 'SLURM_LOCALID', 'OMPI_COMM_WORLD_LOCAL_RANK'): if v in os.environ: local_rank = int(os.environ[v]) break global_rank = 0 for v in ('RANK', 'PMI_RANK', 'SLURM_PROCID', 'OMPI_COMM_WORLD_RANK'): if v in os.environ: global_rank = int(os.environ[v]) break world_size = 1 for v in ('WORLD_SIZE', 'PMI_SIZE', 'SLURM_NTASKS', 'OMPI_COMM_WORLD_SIZE'): if v in os.environ: world_size = int(os.environ[v]) break return local_rank, global_rank, world_size def init_distributed_device(args): # Distributed training = training on more than one GPU. # Works in both single and multi-node scenarios. args.distributed = False args.world_size = 1 args.rank = 0 # global rank args.local_rank = 0 # TBD, support horovod? # if args.horovod: # assert hvd is not None, "Horovod is not installed" # hvd.init() # args.local_rank = int(hvd.local_rank()) # args.rank = hvd.rank() # args.world_size = hvd.size() # args.distributed = True # os.environ['LOCAL_RANK'] = str(args.local_rank) # os.environ['RANK'] = str(args.rank) # os.environ['WORLD_SIZE'] = str(args.world_size) dist_backend = getattr(args, 'dist_backend', 'nccl') dist_url = getattr(args, 'dist_url', 'env://') if is_distributed_env(): if 'SLURM_PROCID' in os.environ: # DDP via SLURM args.local_rank, args.rank, args.world_size = world_info_from_env() # SLURM var -> torch.distributed vars in case needed os.environ['LOCAL_RANK'] = str(args.local_rank) os.environ['RANK'] = str(args.rank) os.environ['WORLD_SIZE'] = str(args.world_size) torch.distributed.init_process_group( backend=dist_backend, init_method=dist_url, world_size=args.world_size, rank=args.rank, ) else: # DDP via torchrun, torch.distributed.launch args.local_rank, _, _ = world_info_from_env() torch.distributed.init_process_group( backend=dist_backend, init_method=dist_url, ) args.world_size = torch.distributed.get_world_size() args.rank = torch.distributed.get_rank() args.distributed = True if torch.cuda.is_available(): if args.distributed: device = 'cuda:%d' % args.local_rank else: device = 'cuda:0' torch.cuda.set_device(device) else: device = 'cpu' args.device = device device = torch.device(device) return device
pytorch-image-models/timm/utils/distributed.py/0
{ "file_path": "pytorch-image-models/timm/utils/distributed.py", "repo_id": "pytorch-image-models", "token_count": 2013 }
182
<div align="center"> # Text Generation Inference benchmarking tool ![benchmark](../assets/benchmark.png) </div> A lightweight benchmarking tool based inspired by [oha](https://github.com/hatoo/oha) and powered by [tui](https://github.com/tui-rs-revival/ratatui). ## Install ```shell make install-benchmark ``` ## Run First, start `text-generation-inference`: ```shell text-generation-launcher --model-id bigscience/bloom-560m ``` Then run the benchmarking tool: ```shell text-generation-benchmark --tokenizer-name bigscience/bloom-560m ```
text-generation-inference/benchmark/README.md/0
{ "file_path": "text-generation-inference/benchmark/README.md", "repo_id": "text-generation-inference", "token_count": 189 }
183
import pytest from text_generation import ( InferenceAPIClient, InferenceAPIAsyncClient, Client, AsyncClient, ) from text_generation.errors import NotSupportedError, NotFoundError from text_generation.inference_api import check_model_support, deployed_models def test_check_model_support(flan_t5_xxl, unsupported_model, fake_model): assert check_model_support(flan_t5_xxl) assert not check_model_support(unsupported_model) with pytest.raises(NotFoundError): check_model_support(fake_model) def test_deployed_models(): deployed_models() def test_client(flan_t5_xxl): client = InferenceAPIClient(flan_t5_xxl) assert isinstance(client, Client) def test_client_unsupported_model(unsupported_model): with pytest.raises(NotSupportedError): InferenceAPIClient(unsupported_model) def test_async_client(flan_t5_xxl): client = InferenceAPIAsyncClient(flan_t5_xxl) assert isinstance(client, AsyncClient) def test_async_client_unsupported_model(unsupported_model): with pytest.raises(NotSupportedError): InferenceAPIAsyncClient(unsupported_model)
text-generation-inference/clients/python/tests/test_inference_api.py/0
{ "file_path": "text-generation-inference/clients/python/tests/test_inference_api.py", "repo_id": "text-generation-inference", "token_count": 411 }
184
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.5390625, "text": " dég" }, { "id": 21543, "logprob": -0.14758301, "text": "uster" }, { "id": 447, "logprob": -1.9296875, "text": " un" }, { "id": 46341, "logprob": -15.4453125, "text": " ort" }, { "id": 35567, "logprob": -7.59375, "text": "olan" }, { "id": 15, "logprob": -1.3994141, "text": "," }, { "id": 1669, "logprob": -1.578125, "text": " il" }, { "id": 11580, "logprob": -0.9453125, "text": " faut" }, { "id": 3913, "logprob": -3.7011719, "text": " tout" }, { "id": 39261, "logprob": -1.5732422, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7529297, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.6054688, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5283203, "special": false, "text": " cu" }, { "id": 1273, "logprob": -0.00010049343, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.4716797, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11853027, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.41210938, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.0037765503, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0166016, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.515625, "text": " dég" }, { "id": 21543, "logprob": -0.1484375, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.34375, "text": " ort" }, { "id": 35567, "logprob": -7.515625, "text": "olan" }, { "id": 15, "logprob": -1.4199219, "text": "," }, { "id": 1669, "logprob": -1.5664062, "text": " il" }, { "id": 11580, "logprob": -0.94091797, "text": " faut" }, { "id": 3913, "logprob": -3.6660156, "text": " tout" }, { "id": 39261, "logprob": -1.7753906, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7626953, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.5820312, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5097656, "special": false, "text": " cu" }, { "id": 1273, "logprob": -9.393692e-05, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.5175781, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11883545, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4909668, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.515625, "text": " dég" }, { "id": 21543, "logprob": -0.1484375, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.34375, "text": " ort" }, { "id": 35567, "logprob": -7.515625, "text": "olan" }, { "id": 15, "logprob": -1.4199219, "text": "," }, { "id": 1669, "logprob": -1.5664062, "text": " il" }, { "id": 11580, "logprob": -0.94091797, "text": " faut" }, { "id": 3913, "logprob": -3.6660156, "text": " tout" }, { "id": 39261, "logprob": -1.7753906, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7626953, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.5820312, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5097656, "special": false, "text": " cu" }, { "id": 1273, "logprob": -9.393692e-05, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.5175781, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11883545, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4909668, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 17934, "logprob": null, "text": "Pour" }, { "id": 49833, "logprob": -10.515625, "text": " dég" }, { "id": 21543, "logprob": -0.1484375, "text": "uster" }, { "id": 447, "logprob": -1.9287109, "text": " un" }, { "id": 46341, "logprob": -15.34375, "text": " ort" }, { "id": 35567, "logprob": -7.515625, "text": "olan" }, { "id": 15, "logprob": -1.4199219, "text": "," }, { "id": 1669, "logprob": -1.5664062, "text": " il" }, { "id": 11580, "logprob": -0.94091797, "text": " faut" }, { "id": 3913, "logprob": -3.6660156, "text": " tout" }, { "id": 39261, "logprob": -1.7753906, "text": " d'abord" } ], "seed": null, "tokens": [ { "id": 578, "logprob": -1.7626953, "special": false, "text": " le" }, { "id": 5608, "logprob": -2.5820312, "special": false, "text": " faire" }, { "id": 1767, "logprob": -1.5097656, "special": false, "text": " cu" }, { "id": 1273, "logprob": -9.393692e-05, "special": false, "text": "ire" }, { "id": 1486, "logprob": -1.5175781, "special": false, "text": " dans" }, { "id": 283, "logprob": -1.1982422, "special": false, "text": " de" }, { "id": 40410, "logprob": -0.11883545, "special": false, "text": " l'eau" }, { "id": 20226, "logprob": -0.4909668, "special": false, "text": " bou" }, { "id": 172483, "logprob": -0.003047943, "special": false, "text": "illante" }, { "id": 2805, "logprob": -1.0185547, "special": false, "text": " sal" } ] }, "generated_text": " le faire cuire dans de l'eau bouillante sal" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m_sharded/test_bloom_560m_sharded_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m_sharded/test_bloom_560m_sharded_load.json", "repo_id": "text-generation-inference", "token_count": 7258 }
185
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -10.734375, "text": "What" }, { "id": 338, "logprob": -1.5488281, "text": "is" }, { "id": 21784, "logprob": -9.2890625, "text": "Deep" }, { "id": 29257, "logprob": -1.2753906, "text": "Learning" }, { "id": 29973, "logprob": -0.48046875, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.1845703, "special": false, "text": "\n" }, { "id": 2772, "logprob": -0.5727539, "special": false, "text": "De" }, { "id": 1022, "logprob": -0.00010967255, "special": false, "text": "ep" }, { "id": 6509, "logprob": -0.1239624, "special": false, "text": " learning" }, { "id": 338, "logprob": -0.04510498, "special": false, "text": " is" }, { "id": 263, "logprob": -0.018295288, "special": false, "text": " a" }, { "id": 11306, "logprob": -0.45922852, "special": false, "text": " subset" }, { "id": 310, "logprob": -0.00020992756, "special": false, "text": " of" }, { "id": 4933, "logprob": -0.0046539307, "special": false, "text": " machine" }, { "id": 6509, "logprob": -0.00025844574, "special": false, "text": " learning" } ] }, "generated_text": "\nDeep learning is a subset of machine learning" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -10.734375, "text": "What" }, { "id": 338, "logprob": -1.5488281, "text": "is" }, { "id": 21784, "logprob": -9.2890625, "text": "Deep" }, { "id": 29257, "logprob": -1.2724609, "text": "Learning" }, { "id": 29973, "logprob": -0.47729492, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.1826172, "special": false, "text": "\n" }, { "id": 2772, "logprob": -0.56689453, "special": false, "text": "De" }, { "id": 1022, "logprob": -0.000108003616, "special": false, "text": "ep" }, { "id": 6509, "logprob": -0.1239624, "special": false, "text": " learning" }, { "id": 338, "logprob": -0.044433594, "special": false, "text": " is" }, { "id": 263, "logprob": -0.018295288, "special": false, "text": " a" }, { "id": 11306, "logprob": -0.45922852, "special": false, "text": " subset" }, { "id": 310, "logprob": -0.0002104044, "special": false, "text": " of" }, { "id": 4933, "logprob": -0.004711151, "special": false, "text": " machine" }, { "id": 6509, "logprob": -0.00025892258, "special": false, "text": " learning" } ] }, "generated_text": "\nDeep learning is a subset of machine learning" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -10.734375, "text": "What" }, { "id": 338, "logprob": -1.5488281, "text": "is" }, { "id": 21784, "logprob": -9.2890625, "text": "Deep" }, { "id": 29257, "logprob": -1.2724609, "text": "Learning" }, { "id": 29973, "logprob": -0.47729492, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.1826172, "special": false, "text": "\n" }, { "id": 2772, "logprob": -0.56689453, "special": false, "text": "De" }, { "id": 1022, "logprob": -0.000108003616, "special": false, "text": "ep" }, { "id": 6509, "logprob": -0.1239624, "special": false, "text": " learning" }, { "id": 338, "logprob": -0.044433594, "special": false, "text": " is" }, { "id": 263, "logprob": -0.018295288, "special": false, "text": " a" }, { "id": 11306, "logprob": -0.45922852, "special": false, "text": " subset" }, { "id": 310, "logprob": -0.0002104044, "special": false, "text": " of" }, { "id": 4933, "logprob": -0.004711151, "special": false, "text": " machine" }, { "id": 6509, "logprob": -0.00025892258, "special": false, "text": " learning" } ] }, "generated_text": "\nDeep learning is a subset of machine learning" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -10.734375, "text": "What" }, { "id": 338, "logprob": -1.5488281, "text": "is" }, { "id": 21784, "logprob": -9.2890625, "text": "Deep" }, { "id": 29257, "logprob": -1.2724609, "text": "Learning" }, { "id": 29973, "logprob": -0.47729492, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.1826172, "special": false, "text": "\n" }, { "id": 2772, "logprob": -0.56689453, "special": false, "text": "De" }, { "id": 1022, "logprob": -0.000108003616, "special": false, "text": "ep" }, { "id": 6509, "logprob": -0.1239624, "special": false, "text": " learning" }, { "id": 338, "logprob": -0.044433594, "special": false, "text": " is" }, { "id": 263, "logprob": -0.018295288, "special": false, "text": " a" }, { "id": 11306, "logprob": -0.45922852, "special": false, "text": " subset" }, { "id": 310, "logprob": -0.0002104044, "special": false, "text": " of" }, { "id": 4933, "logprob": -0.004711151, "special": false, "text": " machine" }, { "id": 6509, "logprob": -0.00025892258, "special": false, "text": " learning" } ] }, "generated_text": "\nDeep learning is a subset of machine learning" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_load.json", "repo_id": "text-generation-inference", "token_count": 5726 }
186
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2602539, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39282227, "special": false, "text": " print" }, { "id": 440, "logprob": -0.6113281, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.4765625, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.8154297, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7319336, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0380859, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2602539, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39282227, "special": false, "text": " print" }, { "id": 440, "logprob": -0.6113281, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.4765625, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.8154297, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7319336, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0380859, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2602539, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39282227, "special": false, "text": " print" }, { "id": 440, "logprob": -0.6113281, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.4765625, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.8154297, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7319336, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0380859, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 1459, "logprob": -5.6289062, "text": " print" }, { "id": 81, "logprob": -1.6005859, "text": "_" }, { "id": 7656, "logprob": -5.9921875, "text": "hello" } ], "seed": null, "tokens": [ { "id": 2262, "logprob": -0.7705078, "special": false, "text": "():" }, { "id": 284, "logprob": -0.2602539, "special": false, "text": "\n " }, { "id": 1459, "logprob": -0.39282227, "special": false, "text": " print" }, { "id": 440, "logprob": -0.6113281, "special": false, "text": "(\"" }, { "id": 8279, "logprob": -0.4765625, "special": false, "text": "Hello" }, { "id": 10896, "logprob": -1.5068359, "special": false, "text": " World" }, { "id": 657, "logprob": -0.8154297, "special": false, "text": "\")" }, { "id": 203, "logprob": -0.7319336, "special": false, "text": "\n" }, { "id": 203, "logprob": -0.35229492, "special": false, "text": "\n" }, { "id": 589, "logprob": -1.0380859, "special": false, "text": "def" } ] }, "generated_text": "():\n print(\"Hello World\")\n\ndef" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder_load.json", "repo_id": "text-generation-inference", "token_count": 5176 }
187
import pytest @pytest.fixture(scope="module") def idefics_handle(launcher): with launcher( "HuggingFaceM4/idefics-9b-instruct", num_shard=2, dtype="float16" ) as handle: yield handle @pytest.fixture(scope="module") async def idefics(idefics_handle): await idefics_handle.health(300) return idefics_handle.client @pytest.mark.asyncio async def test_idefics(idefics, response_snapshot): response = await idefics.generate( "User:![](https://temp-5681.s3.us-west-2.amazonaws.com/chicken_on_money.png)Can you tell me a very short story based on the image?", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_idefics_load(idefics, generate_load, response_snapshot): responses = await generate_load( idefics, "User:![](https://temp-5681.s3.us-west-2.amazonaws.com/chicken_on_money.png)Can you tell me a very short story based on the image?", max_new_tokens=10, n=4, ) generated_texts = [r.generated_text for r in responses] assert len(generated_texts) == 4 assert generated_texts, all( [text == generated_texts[0] for text in generated_texts] ) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_idefics.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_idefics.py", "repo_id": "text-generation-inference", "token_count": 552 }
188
import { get_options, run } from "./common.js"; const reference_latency_ms = 70; const host = __ENV.HOST || '127.0.0.1:8000'; const max_new_tokens = 50; function generate_payload(gpt){ const input = gpt["conversations"][0]["value"]; return {"inputs": input, "parameters": {"max_new_tokens": max_new_tokens, "decoder_input_details": true}} } export const options = get_options(reference_latency_ms); export default function(){ run(host, generate_payload, max_new_tokens); }
text-generation-inference/load_tests/tgi.js/0
{ "file_path": "text-generation-inference/load_tests/tgi.js", "repo_id": "text-generation-inference", "token_count": 185 }
189
mod health; /// Text Generation Inference Webserver mod infer; mod queue; pub mod server; mod validation; use infer::{Infer, InferError, InferStreamResponse}; use queue::{Entry, Queue}; use serde::{Deserialize, Serialize}; use tokio::sync::OwnedSemaphorePermit; use tokio_stream::wrappers::UnboundedReceiverStream; use utoipa::ToSchema; use validation::Validation; /// Type alias for generation responses pub(crate) type GenerateStreamResponse = ( OwnedSemaphorePermit, u32, // input_length UnboundedReceiverStream<Result<InferStreamResponse, InferError>>, ); /// Hub type #[derive(Clone, Debug, Deserialize)] pub struct HubModelInfo { #[serde(rename(deserialize = "id"))] pub model_id: String, pub sha: Option<String>, pub pipeline_tag: Option<String>, } #[derive(Clone, Deserialize, Default)] pub struct HubTokenizerConfig { pub chat_template: Option<String>, pub bos_token: Option<String>, pub eos_token: Option<String>, } impl HubTokenizerConfig { pub fn from_file(filename: &std::path::Path) -> Self { let content = std::fs::read_to_string(filename).unwrap(); serde_json::from_str(&content).unwrap_or_default() } } #[derive(Clone, Debug, Serialize, ToSchema)] pub struct Info { /// Model info #[schema(example = "bigscience/blomm-560m")] pub model_id: String, #[schema(nullable = true, example = "e985a63cdc139290c5f700ff1929f0b5942cced2")] pub model_sha: Option<String>, #[schema(example = "torch.float16")] pub model_dtype: String, #[schema(example = "cuda")] pub model_device_type: String, #[schema(nullable = true, example = "text-generation")] pub model_pipeline_tag: Option<String>, /// Router Parameters #[schema(example = "128")] pub max_concurrent_requests: usize, #[schema(example = "2")] pub max_best_of: usize, #[schema(example = "4")] pub max_stop_sequences: usize, #[schema(example = "1024")] pub max_input_length: usize, #[schema(example = "2048")] pub max_total_tokens: usize, #[schema(example = "1.2")] pub waiting_served_ratio: f32, #[schema(example = "32000")] pub max_batch_total_tokens: u32, #[schema(example = "20")] pub max_waiting_tokens: usize, #[schema(example = "2")] pub validation_workers: usize, /// Router Info #[schema(example = "0.5.0")] pub version: &'static str, #[schema(nullable = true, example = "null")] pub sha: Option<&'static str>, #[schema(nullable = true, example = "null")] pub docker_label: Option<&'static str>, } #[derive(Clone, Debug, Deserialize, ToSchema)] pub(crate) struct GenerateParameters { #[serde(default)] #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 1)] pub best_of: Option<usize>, #[serde(default)] #[schema( exclusive_minimum = 0.0, nullable = true, default = "null", example = 0.5 )] pub temperature: Option<f32>, #[serde(default)] #[schema( exclusive_minimum = 0.0, nullable = true, default = "null", example = 1.03 )] pub repetition_penalty: Option<f32>, #[serde(default)] #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 10)] pub top_k: Option<i32>, #[serde(default)] #[schema( exclusive_minimum = 0.0, maximum = 1.0, nullable = true, default = "null", example = 0.95 )] pub top_p: Option<f32>, #[serde(default)] #[schema( exclusive_minimum = 0.0, maximum = 1.0, nullable = true, default = "null", example = 0.95 )] pub typical_p: Option<f32>, #[serde(default)] #[schema(default = "false", example = true)] pub do_sample: bool, #[serde(default = "default_max_new_tokens")] #[schema(nullable = true, default = "100", example = "20")] pub max_new_tokens: Option<u32>, #[serde(default)] #[schema(nullable = true, default = "null", example = false)] pub return_full_text: Option<bool>, #[serde(default)] #[schema(inline, max_items = 4, example = json ! (["photographer"]))] pub stop: Vec<String>, #[serde(default)] #[schema(nullable = true, default = "null", example = "null")] pub truncate: Option<usize>, #[serde(default)] #[schema(default = "false", example = true)] pub watermark: bool, #[serde(default)] #[schema(default = "true")] pub details: bool, #[serde(default)] #[schema(default = "true")] pub decoder_input_details: bool, #[serde(default)] #[schema( exclusive_minimum = 0, nullable = true, default = "null", example = "null" )] pub seed: Option<u64>, #[serde(default)] #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 5)] pub top_n_tokens: Option<u32>, } fn default_max_new_tokens() -> Option<u32> { Some(100) } fn default_parameters() -> GenerateParameters { GenerateParameters { best_of: None, temperature: None, repetition_penalty: None, top_k: None, top_p: None, typical_p: None, do_sample: true, max_new_tokens: default_max_new_tokens(), return_full_text: None, stop: Vec::new(), truncate: None, watermark: false, details: false, decoder_input_details: false, seed: None, top_n_tokens: None, } } #[derive(Clone, Deserialize, Serialize, ToSchema)] pub(crate) struct ChatCompletion { pub id: String, pub object: String, #[schema(example = "1706270835")] pub created: u64, #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")] pub model: String, pub system_fingerprint: String, pub choices: Vec<ChatCompletionComplete>, pub usage: Usage, } #[derive(Clone, Deserialize, Serialize, ToSchema)] pub(crate) struct ChatCompletionComplete { pub index: u32, pub message: Message, pub logprobs: Option<Vec<f32>>, pub finish_reason: String, } #[derive(Clone, Deserialize, Serialize)] pub(crate) struct Usage { pub prompt_tokens: u32, pub completion_tokens: u32, pub total_tokens: u32, } impl ChatCompletion { pub(crate) fn new( model: String, system_fingerprint: String, output: String, created: u64, details: Details, return_logprobs: bool, ) -> Self { Self { id: String::new(), object: "text_completion".into(), created, model, system_fingerprint, choices: vec![ChatCompletionComplete { index: 0, message: Message { role: "assistant".into(), content: output, }, logprobs: return_logprobs .then(|| details.tokens.iter().map(|t| t.logprob).collect()), finish_reason: details.finish_reason.to_string(), }], usage: Usage { prompt_tokens: details.prefill.len() as u32, completion_tokens: details.generated_tokens, total_tokens: details.prefill.len() as u32 + details.generated_tokens, }, } } } #[derive(Clone, Deserialize, Serialize, ToSchema)] pub(crate) struct ChatCompletionChunk { pub id: String, pub object: String, #[schema(example = "1706270978")] pub created: u64, #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")] pub model: String, pub system_fingerprint: String, pub choices: Vec<ChatCompletionChoice>, } #[derive(Clone, Deserialize, Serialize, ToSchema)] pub(crate) struct ChatCompletionChoice { pub index: u32, pub delta: ChatCompletionDelta, pub logprobs: Option<f32>, pub finish_reason: Option<String>, } #[derive(Clone, Debug, Deserialize, Serialize, ToSchema)] pub(crate) struct ChatCompletionDelta { #[schema(example = "user")] pub role: String, #[schema(example = "What is Deep Learning?")] pub content: String, } impl ChatCompletionChunk { pub(crate) fn new( model: String, system_fingerprint: String, delta: String, created: u64, index: u32, logprobs: Option<f32>, finish_reason: Option<String>, ) -> Self { Self { id: String::new(), object: "text_completion".to_string(), created, model, system_fingerprint, choices: vec![ChatCompletionChoice { index, delta: ChatCompletionDelta { role: "assistant".to_string(), content: delta, }, logprobs, finish_reason, }], } } } fn default_request_messages() -> Vec<Message> { vec![Message { role: "user".to_string(), content: "My name is David and I".to_string(), }] } #[derive(Clone, Deserialize, ToSchema, Serialize)] pub(crate) struct ChatRequest { /// UNUSED #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")] /// ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API. pub model: String, /* NOTE: UNUSED */ /// A list of messages comprising the conversation so far. #[serde(default = "default_request_messages")] pub messages: Vec<Message>, /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, /// decreasing the model's likelihood to repeat the same line verbatim. #[serde(default)] #[schema(example = "1.0")] pub frequency_penalty: Option<f32>, /// UNUSED /// Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens /// (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, /// the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, /// but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should /// result in a ban or exclusive selection of the relevant token. #[serde(default)] pub logit_bias: Option<Vec<f32>>, /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each /// output token returned in the content of message. #[serde(default)] #[schema(example = "false")] pub logprobs: Option<bool>, /// UNUSED /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with /// an associated log probability. logprobs must be set to true if this parameter is used. #[serde(default)] #[schema(example = "5")] pub top_logprobs: Option<u32>, /// The maximum number of tokens that can be generated in the chat completion. #[serde(default)] #[schema(example = "32")] pub max_tokens: Option<u32>, /// UNUSED /// How many chat completion choices to generate for each input message. Note that you will be charged based on the /// number of generated tokens across all of the choices. Keep n as 1 to minimize costs. #[serde(default)] #[schema(nullable = true, example = "2")] pub n: Option<u32>, /// UNUSED /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, /// increasing the model's likelihood to talk about new topics #[serde(default)] #[schema(nullable = true, example = 0.1)] pub presence_penalty: Option<f32>, #[serde(default = "bool::default")] pub stream: bool, #[schema(nullable = true, example = 42)] pub seed: Option<u64>, /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while /// lower values like 0.2 will make it more focused and deterministic. /// /// We generally recommend altering this or `top_p` but not both. #[serde(default)] #[schema(nullable = true, example = 1.0)] pub temperature: Option<f32>, /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the /// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. #[serde(default)] #[schema(nullable = true, example = 0.95)] pub top_p: Option<f32>, } #[derive(Clone, Serialize, Deserialize)] pub(crate) struct ChatTemplateInputs<'a> { messages: Vec<Message>, bos_token: Option<&'a str>, eos_token: Option<&'a str>, } #[derive(Clone, Deserialize, ToSchema, Serialize)] pub(crate) struct Message { #[schema(example = "user")] pub role: String, #[schema(example = "My name is David and I")] pub content: String, } #[derive(Clone, Debug, Deserialize, ToSchema)] pub(crate) struct GenerateRequest { #[schema(example = "My name is Olivier and I")] pub inputs: String, #[serde(default = "default_parameters")] pub parameters: GenerateParameters, } #[derive(Clone, Debug, Deserialize, ToSchema)] pub(crate) struct CompatGenerateRequest { #[schema(example = "My name is Olivier and I")] pub inputs: String, #[serde(default = "default_parameters")] pub parameters: GenerateParameters, #[serde(default)] #[schema(default = "false")] pub stream: bool, } impl From<CompatGenerateRequest> for GenerateRequest { fn from(req: CompatGenerateRequest) -> Self { Self { inputs: req.inputs, parameters: req.parameters, } } } #[derive(Debug, Serialize, ToSchema)] pub struct PrefillToken { #[schema(example = 0)] id: u32, #[schema(example = "test")] text: String, #[schema(nullable = true, example = - 0.34)] logprob: f32, } #[derive(Debug, Serialize, ToSchema)] pub struct Token { #[schema(example = 0)] id: u32, #[schema(example = "test")] text: String, #[schema(nullable = true, example = - 0.34)] logprob: f32, #[schema(example = "false")] special: bool, } #[derive(Debug, Serialize, ToSchema)] pub struct SimpleToken { #[schema(example = 0)] id: u32, #[schema(example = "test")] text: String, #[schema(example = 0)] start: usize, #[schema(example = 2)] stop: usize, } #[derive(Serialize, ToSchema)] #[serde(rename_all(serialize = "snake_case"))] #[schema(example = "Length")] pub(crate) enum FinishReason { #[schema(rename = "length")] Length, #[serde(rename = "eos_token")] #[schema(rename = "eos_token")] EndOfSequenceToken, #[schema(rename = "stop_sequence")] StopSequence, } impl std::fmt::Display for FinishReason { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { FinishReason::Length => write!(f, "length"), FinishReason::EndOfSequenceToken => write!(f, "eos_token"), FinishReason::StopSequence => write!(f, "stop_sequence"), } } } #[derive(Serialize, ToSchema)] pub(crate) struct BestOfSequence { #[schema(example = "test")] pub generated_text: String, #[schema(example = "length")] pub finish_reason: FinishReason, #[schema(example = 1)] pub generated_tokens: u32, #[schema(nullable = true, example = 42)] pub seed: Option<u64>, pub prefill: Vec<PrefillToken>, pub tokens: Vec<Token>, #[serde(skip_serializing_if = "Vec::is_empty")] pub top_tokens: Vec<Vec<Token>>, } #[derive(Serialize, ToSchema)] pub(crate) struct Details { #[schema(example = "length")] pub finish_reason: FinishReason, #[schema(example = 1)] pub generated_tokens: u32, #[schema(nullable = true, example = 42)] pub seed: Option<u64>, pub prefill: Vec<PrefillToken>, pub tokens: Vec<Token>, #[serde(skip_serializing_if = "Option::is_none")] pub best_of_sequences: Option<Vec<BestOfSequence>>, #[serde(skip_serializing_if = "Vec::is_empty")] pub top_tokens: Vec<Vec<Token>>, } #[derive(Serialize, ToSchema)] pub(crate) struct GenerateResponse { #[schema(example = "test")] pub generated_text: String, #[serde(skip_serializing_if = "Option::is_none")] pub details: Option<Details>, } #[derive(Serialize, ToSchema)] #[serde(transparent)] pub(crate) struct TokenizeResponse(Vec<SimpleToken>); #[derive(Serialize, ToSchema)] pub(crate) struct StreamDetails { #[schema(example = "length")] pub finish_reason: FinishReason, #[schema(example = 1)] pub generated_tokens: u32, #[schema(nullable = true, example = 42)] pub seed: Option<u64>, } #[derive(Serialize, ToSchema)] pub(crate) struct StreamResponse { pub index: u32, pub token: Token, #[serde(skip_serializing_if = "Vec::is_empty")] pub top_tokens: Vec<Token>, #[schema(nullable = true, default = "null", example = "test")] pub generated_text: Option<String>, #[schema(nullable = true, default = "null")] pub details: Option<StreamDetails>, } #[derive(Serialize, ToSchema)] pub(crate) struct ErrorResponse { pub error: String, pub error_type: String, } #[cfg(test)] mod tests { use tokenizers::Tokenizer; pub(crate) async fn get_tokenizer() -> Tokenizer { let api = hf_hub::api::sync::Api::new().unwrap(); let repo = api.model("gpt2".to_string()); let filename = repo.get("tokenizer.json").unwrap(); Tokenizer::from_file(filename).unwrap() } }
text-generation-inference/router/src/lib.rs/0
{ "file_path": "text-generation-inference/router/src/lib.rs", "repo_id": "text-generation-inference", "token_count": 7411 }
190
#include <ATen/Dispatch.h> #include <THC/THCAtomics.cuh> #include <ATen/ATen.h> #include <torch/torch.h> #include <vector> #include <optional> /** * Friendly reminder of how multithreading works in CUDA: https://developer.nvidia.com/blog/even-easier-introduction-cuda * Check example at https://github.com/thomasw21/LinearTransformers/blob/main/model/attention/fast_weight/fast_weight_cuda.cu **/ // Available in pytorch main //#define DISPATCH_CASE_FLOATING_TYPES(...) \ // at::AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \ // at::AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \ // at::AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \ // at::AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__) \ /* * Forward passes */ /** * cast to fp32 if in fp16 + mask + softmax computation in fp32 + cast back to original dtype **/ template<typename attention_scores_scalar, int64_t min_kv_length_shard_size_per_thread> __global__ void forward_masked_softmax_kernel( const torch::PackedTensorAccessor32<attention_scores_scalar, 2, torch::RestrictPtrTraits> attention_scores, // [B, KV] const torch::PackedTensorAccessor32<bool, 2, torch::RestrictPtrTraits> mask, // [B, KV] torch::PackedTensorAccessor32<attention_scores_scalar, 2, torch::RestrictPtrTraits> result, // [B, KV] const int64_t effective_kv_length, const dim3 blockDim, const int64_t rows_per_block, const int64_t kv_length, const int64_t batch_size ) { const auto row_id = threadIdx.x / effective_kv_length; const auto effective_kv_length_id = threadIdx.x % effective_kv_length; const auto kv_length_start = effective_kv_length_id * min_kv_length_shard_size_per_thread; auto kv_length_end_ = (effective_kv_length_id + 1) * min_kv_length_shard_size_per_thread; kv_length_end_ = (kv_length_end_ > kv_length) ? kv_length : kv_length_end_; const auto kv_length_end = kv_length_end_; const auto batch_id = blockIdx.x * rows_per_block + row_id; // We need 2 float storage for each row, one for max computation, the other for normalizing exponential extern __shared__ float temp_storage[]; const auto row_id_mem_offset = row_id * 2; if (effective_kv_length_id == 0) { temp_storage[row_id_mem_offset] = -std::numeric_limits<float>::infinity(); temp_storage[row_id_mem_offset + 1] = 0; } __syncthreads(); // Compute mask and max if (batch_id < batch_size) { float thread_max = -std::numeric_limits<float>::infinity(); for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) { if (mask[batch_id][kv_length_id] == 0) { const float candidate = attention_scores[batch_id][kv_length_id]; thread_max = (thread_max < candidate) ? candidate : thread_max; } } if (thread_max != -std::numeric_limits<float>::infinity()) { // TODO @thomasw21 with more memory we can probably compute a much faster `max-reduce` in parallel O(ln(n)) operations in each memory slot gpuAtomicMax(&temp_storage[row_id_mem_offset], thread_max); } } __syncthreads(); // Compute exp(elt - max) masked float exponential[min_kv_length_shard_size_per_thread]; if (batch_id < batch_size) { float thread_add = 0; for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) { if (mask[batch_id][kv_length_id] == 0) { exponential[kv_length_id - kv_length_start] = std::exp(static_cast<float>(attention_scores[batch_id][kv_length_id]) - temp_storage[row_id_mem_offset]); thread_add = thread_add + exponential[kv_length_id - kv_length_start]; } else { exponential[kv_length_id - kv_length_start] = 0.; } } if (thread_add > 0) { // TODO @thomasw21 with more memory we can probably compute a much faster `sum-reduce` in parallel O(ln(n)) operations in each memory slot gpuAtomicAdd(&temp_storage[row_id_mem_offset + 1], thread_add); } } __syncthreads(); // Compute softmax if (batch_id < batch_size) { // If sum of all exponential is 0, we set the softmax values to 0 if (temp_storage[row_id_mem_offset + 1] == 0.) { for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) { result[batch_id][kv_length_id] = 0.; } } else { for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) { result[batch_id][kv_length_id] = static_cast<attention_scores_scalar>(exponential[kv_length_id - kv_length_start] / temp_storage[row_id_mem_offset + 1]); } } } } #define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) std::tuple<at::Tensor, std::optional<std::vector<at::Tensor>>, at::Tensor> forward( const at::Tensor fused_qkv, const std::optional<std::vector<at::Tensor>> layer_past, const at::Tensor alibi, const at::Tensor attention_mask, const std::optional<at::Tensor> head_mask, const float beta, const float inv_norm_factor, const int num_heads, const bool use_cache ) { const auto batch_size = fused_qkv.size(0); const auto q_length = fused_qkv.size(1); const auto three_times_hidden_size = fused_qkv.size(2); const auto head_dim = three_times_hidden_size / (3 * num_heads); const auto batch_size_times_num_heads = batch_size * num_heads; // `split_heads` const auto fused_qkv_view = fused_qkv.view({batch_size, q_length, num_heads, 3 * head_dim}); const auto tensor_list = fused_qkv_view.split(head_dim, -1); const auto query_layer = tensor_list[0].transpose(1, 2).reshape({batch_size_times_num_heads, q_length, head_dim}); auto key_layer = tensor_list[1].permute({0, 2, 3, 1}).reshape({batch_size_times_num_heads, head_dim, q_length}); auto value_layer = tensor_list[2].transpose(1, 2).reshape({batch_size_times_num_heads, q_length, head_dim}); if (layer_past) { const auto past_key = (*layer_past).at(0); const auto past_value = (*layer_past).at(1); key_layer = at::cat({past_key, key_layer}, 2); value_layer = at::cat({past_value, value_layer}, 1); } std::optional<std::vector<at::Tensor>> present; if (use_cache) { present = {key_layer, value_layer}; } else { present = {}; } auto attention_scores = alibi.baddbmm(query_layer, key_layer, beta, inv_norm_factor); // Computing `optionally_cast_fp16_to_fp32 + masked_fill + softmax + cast_to_intial_dtype` at::Tensor attention_probs; if (true) { const auto kv_length = key_layer.size(2); // TODO @thomasw21: it's easier to think of attention_scores as 2D tensors const auto attention_scores_2d = attention_scores.view({batch_size_times_num_heads * q_length, kv_length}); const auto attention_mask_2d = attention_mask.view({batch_size_times_num_heads * q_length, kv_length}); // Custom kernel attention_probs = at::empty_like(attention_scores_2d); // Check that inputs and contiguous + cuda tensors CHECK_INPUT(attention_scores_2d); CHECK_INPUT(attention_mask_2d); // TODO @thomas21: change by to this as it's cleaner when pytorch 1.13 comes out // DISPATCH_CASE_FLOATING_TYPES(attention_scores.scalar_type(), "masked_softmax", [&] { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, attention_scores.scalar_type(), "masked_softmax", [&] { /* * Understanding how GPUs work: https://developer.nvidia.com/blog/cuda-refresher-cuda-programming-model/ * A100 specifications: https://images.nvidia.com/aem-dam/en-zz/Solutions/data-center/nvidia-ampere-architecture-whitepaper.pdf * - SMs: 108 * - TPCs: 56 (What's that?) * - Memory size: 40 GB * - L2 Cache size: 40960 KB (shared across all SMs) * - L1/Shared memory size: 192 KB (shared across all threads within a SM) * - Max Threads / SM: 2048 * - Max Thread Blocks / SM: 32 */ /* * We should split [batch_size_times_num_heads_block, q_length] in seperate blocks and [batch_size_times_num_heads_block_size, kv_length] a single block * with multiple threads as we need to `sync_threads` to run exponential sum. * We maximise the usage of threads within a single block */ // TODO @thomasw21 figure out everything warp related: // - why do they have to be power of 2 // TODO @thomas21 check why everyone is setting 1024 when officially it's 2048 const auto MAX_THREADS_PER_SM = 1024; // TODO @thomasw21 figure out how to have longer sequences, currently the maximum is `max_kv_length = MAX_THREADS_PER_SM * MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD` const auto MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD = 4; // `effective_kv_length = ceil(kv_length / MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD)` const auto effective_kv_length = (kv_length - 1)/ MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD + 1; const auto rows_per_block = MAX_THREADS_PER_SM / effective_kv_length; const auto num_blocks = (batch_size_times_num_heads * q_length - 1) / rows_per_block + 1; const dim3 gridDim(num_blocks); // Number of blocks that run const dim3 blockDim(MAX_THREADS_PER_SM); // Number of threads that run per block const int shared_mem_forward = rows_per_block * 2 * sizeof(float); // 192 * 2 ** 10 // const auto MAX_L1_MEMORY = 196608; // const auto MAX_SMs = 108; // TORCH_CHECK(batch_size_times_num_heads * q_length <= MAX_L1_MEMORY, "Shared memory exceeds 192KB limitation."); // TORCH_CHECK(gridDim.x * gridDim.y * gridDim.z <= MAX_SMs, "A100s only have 108 SMs. Raising as require blocks is bigger."); // TORCH_CHECK(blockDim.x * blockDim.y * blockDim.z <= MAX_THREADS_PER_SM, "A100s only have 2048 threads per block. Raising as require requested threads is higher."); forward_masked_softmax_kernel<scalar_t, MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD><<<gridDim, blockDim, shared_mem_forward>>>( attention_scores_2d.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), attention_mask_2d.packed_accessor32<bool, 2, torch::RestrictPtrTraits>(), attention_probs.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), effective_kv_length, blockDim, rows_per_block, kv_length, batch_size_times_num_heads * q_length ); }); attention_probs = attention_probs.view({batch_size_times_num_heads, q_length, kv_length}); } else { // Pytorch C++ API auto input_dtype = attention_scores.scalar_type(); if (input_dtype == at::ScalarType::Float) { attention_scores = attention_scores.to(at::ScalarType::Float); }; // TODO @thomasw21 Figure out how to get minimum value auto attn_weights = attention_scores.masked_fill_(attention_mask, -1e34); attention_probs = attn_weights.softmax(-1, at::ScalarType::Float).to(input_dtype); } auto context_layer = attention_probs.bmm(value_layer); // `_merge_heads` context_layer = context_layer.view({batch_size, num_heads, q_length, head_dim}); context_layer = context_layer.permute({0, 2, 1, 3}); context_layer = context_layer.reshape({batch_size, q_length, three_times_hidden_size / 3}); return std::make_tuple(context_layer, present, attention_probs); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def( "forward", &forward, "Bloom attention mechanism forward (CUDA)" ); }
text-generation-inference/server/custom_kernels/custom_kernels/fused_bloom_attention_cuda.cu/0
{ "file_path": "text-generation-inference/server/custom_kernels/custom_kernels/fused_bloom_attention_cuda.cu", "repo_id": "text-generation-inference", "token_count": 5344 }
191
from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension setup( name="exllama_kernels", ext_modules=[ CUDAExtension( name="exllama_kernels", sources=[ "exllama_kernels/exllama_ext.cpp", "exllama_kernels/cuda_buffers.cu", "exllama_kernels/cuda_func/column_remap.cu", "exllama_kernels/cuda_func/q4_matmul.cu", "exllama_kernels/cuda_func/q4_matrix.cu", ], ) ], cmdclass={"build_ext": BuildExtension}, )
text-generation-inference/server/exllama_kernels/setup.py/0
{ "file_path": "text-generation-inference/server/exllama_kernels/setup.py", "repo_id": "text-generation-inference", "token_count": 319 }
192
#ifndef _qdq_8_cuh #define _qdq_8_cuh #include "qdq_util.cuh" #include "../../config.h" #if QMODE_8BIT == 1 // Not implemented #else __forceinline__ __device__ void shuffle_8bit_4 ( uint32_t* q, int stride ) { } __forceinline__ __device__ void dequant_8bit_8 ( const uint32_t q_0, const uint32_t q_1, half2 (&dq)[4], int stride ) { half dqh[8]; for (int i = 0; i < 4; i++) dqh[i ] = dq_ns(exb(q_0, i * 8, 0xff), 128); for (int i = 0; i < 4; i++) dqh[i + 4] = dq_ns(exb(q_1, i * 8, 0xff), 128); for (int i = 0; i < 4; i++) dq[i] = __halves2half2(dqh[i * 2], dqh[i * 2 + 1]); } #endif #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_8.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_8.cuh", "repo_id": "text-generation-inference", "token_count": 336 }
193
from text_generation_server.utils.hub import ( download_weights, weight_hub_files, weight_files, ) from text_generation_server.utils.convert import convert_files def test_convert_files(): model_id = "bigscience/bloom-560m" pt_filenames = weight_hub_files(model_id, extension=".bin") local_pt_files = download_weights(pt_filenames, model_id) local_st_files = [ p.parent / f"{p.stem.lstrip('pytorch_')}.safetensors" for p in local_pt_files ] convert_files(local_pt_files, local_st_files, discard_names=[]) found_st_files = weight_files(model_id) assert all([p in found_st_files for p in local_st_files])
text-generation-inference/server/tests/utils/test_convert.py/0
{ "file_path": "text-generation-inference/server/tests/utils/test_convert.py", "repo_id": "text-generation-inference", "token_count": 259 }
194
# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, PositionRotaryEmbedding, TensorParallelHead, get_linear, FastRMSNorm, ) class MistralConfig(PretrainedConfig): model_type = "mistral" def __init__( self, vocab_size=32000, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, hidden_act="silu", max_position_embeddings=4096 * 32, initializer_range=0.02, rms_norm_eps=1e-6, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, sliding_window=None, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.sliding_window = sliding_window # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.pretraining_tp = pretraining_tp self.use_cache = use_cache self.rope_theta = rope_theta super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=False, ) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col( prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], quantize=config.quantize, dim=0, ) if config.quantize not in ["gptq", "awq"]: weight = weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.shape) == [ (num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size, ], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" return TensorParallelColumnLinear( get_linear(weight, bias=None, quantize=config.quantize) ) class MistralAttention(torch.nn.Module): def __init__( self, prefix: str, config, weights, ): super().__init__() self.max_past = ( config.sliding_window if config.sliding_window is not None else -1 ) self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.rotary_emb = PositionRotaryEmbedding.static( config=config, dim=self.head_size, base=config.rope_theta, device=weights.device, ) self.softmax_scale = self.head_size**-0.5 if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = ( config.num_key_value_heads // weights.process_group.size() ) self.query_key_value = load_attention(config, prefix, weights) self.o_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.o_proj", weights=weights, bias=False, ) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange( 0, self.num_key_value_heads, dtype=torch.int32, device=weights.device ).repeat_interleave(self.num_groups) def forward( self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, prefill_cache_indices, ): qkv = self.query_key_value(hidden_states) query, kv = qkv.split( [ self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads, ], dim=1, ) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) if prefill_cache_indices is not None: kv_to_cache = kv[prefill_cache_indices] else: kv_to_cache = kv paged_attention.reshape_and_cache( kv_to_cache[:, 0], kv_to_cache[:, 1], kv_cache[0], kv_cache[1], slots ) # output tensor attn_output = torch.empty_like(query) # Prefill if cu_seqlen_prefill is not None: # flash attention flash_attn.attention( query, torch.select(kv, dim=1, index=0), torch.select(kv, dim=1, index=1), attn_output, cu_seqlen_prefill, max_s, self.softmax_scale, window_size_left=self.max_past, ) # Decode else: paged_attention.attention( attn_output, query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, input_lengths, max_s, ) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) class MistralMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.hidden_act self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate="tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none", ) ) # Fuse gate and up proj self.gate_up_proj = TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"], weights=weights, dim=0, bias=False, ) self.down_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.down_proj", weights=weights, bias=False, ) self.intermediate_size = ( config.intermediate_size // weights.process_group.size() ) def forward(self, hidden_states): gate_up_states = self.gate_up_proj(hidden_states) gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size) return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1]) class MistralLayer(nn.Module): def __init__(self, layer_id, config, weights): super().__init__() prefix = f"model.layers.{layer_id}" self.self_attn = MistralAttention( prefix=f"{prefix}.self_attn", config=config, weights=weights ) self.mlp = MistralMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) self.input_layernorm = FastRMSNorm.load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps ) self.post_attention_layernorm = FastRMSNorm.load( prefix=f"{prefix}.post_attention_layernorm", weights=weights, eps=config.rms_norm_eps, ) def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, prefill_cache_indices, ): normed_hidden_states, res = self.input_layernorm(hidden_states, residual) # Self Attention attn_output = self.self_attn( normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, prefill_cache_indices, ) # faster post attention rms norm normed_attn_res_output, attn_res = self.post_attention_layernorm( attn_output, res ) mlp_output = self.mlp(normed_attn_res_output) return mlp_output, attn_res class MistralModel(torch.nn.Module): def __init__(self, config, weights): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.embed_tokens = TensorParallelEmbedding( prefix="model.embed_tokens", weights=weights ) self.layers = nn.ModuleList( [ MistralLayer( layer_id, config, weights, ) for layer_id in range(config.num_hidden_layers) ] ) self.norm = FastRMSNorm.load( prefix="model.norm", weights=weights, eps=config.rms_norm_eps ) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, true_max_s: int, prefill_cache_indices: Optional[torch.Tensor], ) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin( position_ids, true_max_s, hidden_states.dtype ) residual = None for i, layer in enumerate(self.layers): hidden_states, residual = layer( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, input_lengths, max_s, prefill_cache_indices, ) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states class FlashMistralForCausalLM(torch.nn.Module): def __init__(self, config, weights): super().__init__() self.model = MistralModel(config, weights) self.lm_head = TensorParallelHead.load( config, prefix="lm_head", weights=weights, ) self.max_past = config.sliding_window def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor] = None, ) -> torch.Tensor: true_max_s = max_s if prefill_cache_indices is not None: # Slots also need to be sliced as it has the same size as the whole kv tensor slots = slots[prefill_cache_indices] elif self.max_past is not None: # Clamp in decode mode as paged attention requires clamped values whereas the flash attention # kernel requires the true values max_s = min(self.max_past, max_s) input_lengths = torch.clamp(input_lengths, max=self.max_past) hidden_states = self.model( input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, true_max_s, prefill_cache_indices, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.lm_head(hidden_states) return logits
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py", "repo_id": "text-generation-inference", "token_count": 7491 }
195
# coding=utf-8 # Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch T5 model.""" import copy import math import warnings from typing import Optional, Tuple, Union from loguru import logger import torch import torch.distributed from torch import nn from torch.nn import CrossEntropyLoss from transformers.activations import ACT2FN from transformers.modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, ) from transformers.modeling_utils import PreTrainedModel from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS from transformers.utils import ( is_torch_fx_proxy, ) from transformers import T5Config from text_generation_server.utils.layers import ( TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, TensorParallelHead, ) class PartialTPEmbedding(nn.Module): def __init__(self, prefix: str, weights): super().__init__() weight = weights.get_sharded(f"{prefix}.weight", dim=1) self.weight = nn.Parameter(weight) def forward(self, input: torch.Tensor) -> torch.Tensor: return torch.nn.functional.embedding(input, self.weight) @torch.jit.script def layer_norm(hidden_states, weight, epsilon): # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + epsilon) # convert into half-precision if necessary if weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(weight.dtype) return weight * hidden_states class T5LayerNorm(nn.Module): def __init__(self, prefix, weights, eps=1e-6): """ Construct a layernorm module in the T5 style. No bias and no subtraction of mean. """ super().__init__() weight = weights.get_tensor(f"{prefix}.weight") self.weight = nn.Parameter(weight) self.variance_epsilon = torch.tensor(eps) def forward(self, hidden_states): return layer_norm(hidden_states, self.weight, self.variance_epsilon) try: from apex.normalization import FusedRMSNorm T5LayerNorm = FusedRMSNorm # noqa logger.info( "Discovered apex.normalization.FusedRMSNorm - will use it instead of T5LayerNorm" ) except ImportError: # using the normal T5LayerNorm pass except Exception: logger.warning("discovered apex but it failed to load, falling back to T5LayerNorm") pass ALL_LAYERNORM_LAYERS.append(T5LayerNorm) class T5DenseActDense(nn.Module): def __init__(self, config: T5Config, prefix, weights): super().__init__() self.wi = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.wi", weights=weights, bias=False ) ### XXX: T5 models do not handle well both f16 and quantization. ### Overidding specifically this layer for that reason. ### https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py#L316 ### https://github.com/huggingface/transformers/issues/20287 _q = config.quantize _dtype = weights.dtype weights.dtype = torch.float32 config.quantize = None self.wo_cast = (torch.float32, _dtype) self.wo = TensorParallelRowLinear.load( config, prefix=f"{prefix}.wo", weights=weights, bias=False ) weights.dtype = _dtype config.quantize = _q self.dropout = nn.Dropout(config.dropout_rate) self.act = ( ACT2FN[config.dense_act_fn] if "gelu" not in config.dense_act_fn else lambda x: torch.nn.functional.gelu(x, approximate="tanh") ) def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.to(dtype=self.wo_cast[0]) hidden_states = self.wo(hidden_states) # XXX: Recasting is already done within the layer norm. # Casting back to float16 here modifies results # hidden_states = hidden_states.to(dtype=self.wo_cast[1]) return hidden_states class T5DenseGatedActDense(nn.Module): def __init__(self, config: T5Config, prefix, weights): super().__init__() self.wi_0 = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.wi_0", weights=weights, bias=False ) self.wi_1 = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.wi_1", weights=weights, bias=False ) ### XXX: T5 models do not handle well both f16 and quantization. ### Overidding specifically this layer for that reason. ### https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py#L316 ### https://github.com/huggingface/transformers/issues/20287 _q = config.quantize _dtype = weights.dtype weights.dtype = torch.float32 config.quantize = None self.wo_cast = (torch.float32, _dtype) self.wo = TensorParallelRowLinear.load( config, prefix=f"{prefix}.wo", weights=weights, bias=False ) weights.dtype = _dtype config.quantize = _q self.dropout = nn.Dropout(config.dropout_rate) self.act = ( ACT2FN[config.dense_act_fn] if "gelu" not in config.dense_act_fn else lambda x: torch.nn.functional.gelu(x, approximate="tanh") ) def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.to(dtype=self.wo_cast[0]) hidden_states = self.wo(hidden_states) # XXX: Recasting is already done within the layer norm. # Casting back to float16 here modifies results # hidden_states = hidden_states.to(dtype=self.wo_cast[1]) return hidden_states class T5LayerFF(nn.Module): def __init__(self, config: T5Config, prefix, weights): super().__init__() if config.is_gated_act: self.DenseReluDense = T5DenseGatedActDense( config, prefix=f"{prefix}.DenseReluDense", weights=weights ) else: self.DenseReluDense = T5DenseActDense( config, prefix=f"{prefix}.DenseReluDense", weights=weights ) self.layer_norm = T5LayerNorm( prefix=f"{prefix}.layer_norm", weights=weights, eps=config.layer_norm_epsilon, ) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states class T5Attention(nn.Module): def __init__( self, config: T5Config, prefix, weights, has_relative_attention_bias=False ): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim process_group = weights.process_group # Mesh TensorFlow initialization to avoid scaling before softmax assert self.n_heads % process_group.size() == 0 self.q = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.q", weights=weights, bias=False ) self.k = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.k", weights=weights, bias=False ) self.v = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.v", weights=weights, bias=False ) self.o = TensorParallelRowLinear.load( config, prefix=f"{prefix}.o", weights=weights, bias=False ) if self.n_heads % weights.process_group.size() != 0: raise ValueError( f"`n_heads` must be divisible by `num_shards` (got `n_heads`: {self.n_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.n_heads = self.n_heads // process_group.size() self.inner_dim = self.inner_dim // process_group.size() if self.has_relative_attention_bias: self.relative_attention_bias = PartialTPEmbedding( prefix=f"{prefix}.relative_attention_bias", weights=weights ) @staticmethod def _relative_position_bucket( relative_position, bidirectional=True, num_buckets=32, max_distance=128 ): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min( relative_position, torch.zeros_like(relative_position) ) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1), ) relative_buckets += torch.where( is_small, relative_position, relative_position_if_large ) return relative_buckets def compute_bias(self, query_length, key_length, device=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[ :, None ] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[ None, : ] relative_position = ( memory_position - context_position ) # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias( relative_position_bucket ) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze( 0 ) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: assert ( len(past_key_value) == 2 ), f"past_key_value should have 2 past states: keys and values. Got {len(past_key_value)} past states" real_seq_length += ( past_key_value[0].shape[2] if query_length is None else query_length ) key_length = ( real_seq_length if key_value_states is None else key_value_states.shape[1] ) def shape(states): """projection""" return states.view( batch_size, -1, self.n_heads, self.key_value_proj_dim ).transpose(1, 2) def unshape(states): """reshape""" return ( states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) ) def project(hidden_states, proj_layer, key_value_states, past_key_value): """projects hidden states correctly to key/query states""" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) elif past_key_value.shape[2] != key_value_states.shape[1]: # checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states query_states = shape( self.q(hidden_states) ) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None, ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None, ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype, ) else: position_bias = self.compute_bias( real_seq_length, key_length, device=scores.device ) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias = ( position_bias + mask ) # (batch_size, n_heads, seq_length, key_length) position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape( torch.matmul(attn_weights, value_states) ) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = ( (key_states, value_states) if (self.is_decoder and use_cache) else None ) outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class T5LayerSelfAttention(nn.Module): def __init__(self, config, prefix, weights, has_relative_attention_bias=False): super().__init__() self.SelfAttention = T5Attention( config, prefix=f"{prefix}.SelfAttention", weights=weights, has_relative_attention_bias=has_relative_attention_bias, ) self.layer_norm = T5LayerNorm( prefix=f"{prefix}.layer_norm", weights=weights, eps=config.layer_norm_epsilon, ) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[ 1: ] # add attentions if we output them return outputs class T5LayerCrossAttention(nn.Module): def __init__(self, config, prefix, weights): super().__init__() self.EncDecAttention = T5Attention( config, prefix=f"{prefix}.EncDecAttention", weights=weights, has_relative_attention_bias=False, ) self.layer_norm = T5LayerNorm( prefix=f"{prefix}.layer_norm", weights=weights, eps=config.layer_norm_epsilon, ) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[ 1: ] # add attentions if we output them return outputs class T5Block(nn.Module): def __init__(self, config, prefix, weights, has_relative_attention_bias: bool): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append( T5LayerSelfAttention( config, prefix=f"{prefix}.layer.0", weights=weights, has_relative_attention_bias=has_relative_attention_bias, ) ) if self.is_decoder: i = 2 self.layer.append( T5LayerCrossAttention( config, prefix=f"{prefix}.layer.1", weights=weights ) ) else: i = 1 self.layer.append( T5LayerFF(config, prefix=f"{prefix}.layer.{i}", weights=weights) ) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: if not self.is_decoder: logger.warning( "`past_key_values` is passed to the encoder. Please make sure this is intended." ) expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f"There should be {expected_num_past_key_values} past states. " f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}" f"Got {len(past_key_value)} past key / value states" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[ 2: ] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp( hidden_states, min=-clamp_value, max=clamp_value ) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp( hidden_states, min=-clamp_value, max=clamp_value ) # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = ( present_key_value_state + cross_attention_outputs[1] ) # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp( hidden_states, min=-clamp_value, max=clamp_value ) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) class T5PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = T5Config def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, ( "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id." " See T5 docs for more information" ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full( input_ids.shape[:-1] + (1,), decoder_start_token_id ) shifted_input_ids = torch.cat( [shifted_input_ids, input_ids[..., :-1]], dim=-1 ) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert ( pad_token_id is not None ), "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class T5Stack(T5PreTrainedModel): def __init__(self, config, prefix, weights, embed_tokens): super().__init__(config) self.is_decoder = config.is_decoder self.embed_tokens = embed_tokens self.block = nn.ModuleList( [ T5Block( config, prefix=f"{prefix}.block.{layer_id}", weights=weights, has_relative_attention_bias=(layer_id == 0), ) for layer_id in range(config.num_layers) ] ) self.final_layer_norm = T5LayerNorm( prefix=f"{prefix}.final_layer_norm", weights=weights, eps=config.layer_norm_epsilon, ) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): # Model parallel use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds" ) if inputs_embeds is None: assert ( self.embed_tokens is not None ), "You have to initialize the model with valid token embeddings" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = ( past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length ) if use_cache is True: assert ( self.is_decoder ), f"`use_cache` can only be set to `True` if {self} is used as a decoder" if attention_mask is None: attention_mask = torch.ones( batch_size, mask_seq_length, device=inputs_embeds.device ) if ( self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None ): encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long, ) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: ( encoder_batch_size, encoder_sequence_length, _, ) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones( encoder_hidden_shape, device=inputs_embeds.device ) encoder_extended_attention_mask = self.invert_attention_mask( encoder_attention_mask ) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask( cross_attn_head_mask, self.config.num_layers ) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate( zip(self.block, past_key_values) ): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] # Model parallel if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[ 4 if output_attentions else 3 ] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + ( present_key_value_state, ) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) class T5ForConditionalGeneration(T5PreTrainedModel): def __init__(self, config: T5Config, weights): super().__init__(config) self.model_dim = config.d_model self.shared = TensorParallelEmbedding(prefix="shared", weights=weights) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack( config=encoder_config, prefix="encoder", weights=weights, embed_tokens=self.shared, ) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack( config=decoder_config, prefix="decoder", weights=weights, embed_tokens=self.shared, ) try: self.lm_head = TensorParallelHead.load( config, prefix="lm_head", weights=weights ) except RuntimeError: # Some models like t5-small were saved with shared weights unlike flan # Since they are declared as the same arch we have no choice but hope # that this is OK instead of using a proper flag. self.lm_head = TensorParallelHead.load( config, prefix="shared", weights=weights ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if ( labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None ): # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) # move labels to correct device to enable PP labels = labels.to(lm_logits.device) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, decoder_attention_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: input_ids = input_ids[:, -1:] return { "decoder_input_ids": input_ids, "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "decoder_attention_mask": decoder_attention_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past_key_values, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past_key_values is None: logger.warning( "You might want to consider setting `use_cache=True` to speed up decoding" ) return past_key_values reordered_decoder_past = () for layer_past_states in past_key_values: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select( 0, beam_idx.to(layer_past_state.device) ), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + ( reordered_layer_past_states, ) return reordered_decoder_past
text-generation-inference/server/text_generation_server/models/custom_modeling/t5_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/t5_modeling.py", "repo_id": "text-generation-inference", "token_count": 22421 }
196
import torch import torch.distributed from transformers import AutoConfig, AutoTokenizer from typing import Optional, List, Tuple from text_generation_server.models import CausalLM from text_generation_server.models.custom_modeling.phi_modeling import ( PhiConfig, PhiForCausalLM, ) from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, ) class Phi(CausalLM): def __init__( self, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): self.process_group, _rank, _world_size = initialize_torch_distributed() if torch.cuda.is_available(): device = torch.device("cuda") dtype = torch.float16 if dtype is None else dtype else: if quantize: raise ValueError("quantization is not available on CPU") device = torch.device("cpu") dtype = torch.float32 if dtype is None else dtype tokenizer = AutoTokenizer.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) config = PhiConfig.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code ) tokenizer.bos_token_id = config.bos_token_id tokenizer.eos_token_id = config.eos_token_id tokenizer.pad_token = tokenizer.eos_token config.quantize = quantize torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights(filenames, device, dtype, process_group=self.process_group) model = PhiForCausalLM(config, weights) torch.distributed.barrier(group=self.process_group) super(CausalLM, self).__init__( model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device, )
text-generation-inference/server/text_generation_server/models/phi.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/phi.py", "repo_id": "text-generation-inference", "token_count": 961 }
197
# Adapted from turboderp exllama: https://github.com/turboderp/exllamav2 import torch import torch.nn as nn from loguru import logger try: from exllamav2_kernels import make_q_matrix, gemm_half_q_half except ImportError: logger.error("exllamav2_kernels not installed.") raise # Dummy tensor to pass instead of g_idx since there is no way to pass "None" to a C++ extension none_tensor = torch.empty((1, 1), device="meta") def ext_gemm_half_q_half(x, q_handle, q4_width, force_cuda): """Matrix multiplication, returns x @ q4""" output_shape = x.shape[:-1] + (q4_width,) x = x.view(-1, x.shape[-1]) output = torch.empty((x.shape[0], q4_width), dtype=torch.half, device=x.device) gemm_half_q_half(x, q_handle, output, force_cuda) return output.view(output_shape) # Group map needed for irregular group sizes def make_group_map(q_groups, num_qrows): gr = q_groups.tolist() group_map = [] num_groups = len(gr) // 2 for i in range(num_groups): bits = gr[i * 2] if i < num_groups - 1: qrows = gr[i * 2 + 3] - gr[i * 2 + 1] else: qrows = num_qrows - gr[i * 2 + 1] rows = qrows * 32 // bits for j in range(rows): group_map += [i] group_map += [rows - j] return torch.tensor(group_map, dtype=torch.short, device=q_groups.device) # Create Q matrix def ext_make_q_matrix(w: dict, temp_dq, key: str = None): """ Create Q matrix """ # EXL2 # won't work as the moment because the tensors are not the same. if "q_weight" in w: w["q_scale_max"] /= 256 w["q_perm"] = w["q_perm"].short() w["q_invperm"] = w["q_invperm"].short() if "q_group_map" not in w: w["q_group_map"] = make_group_map(w["q_groups"], w["q_weight"].shape[0]) return make_q_matrix( w["q_weight"], w["q_perm"], w["q_invperm"], w["q_scale"], w["q_scale_max"], w["q_groups"], w["q_group_map"], none_tensor, none_tensor, none_tensor, temp_dq, ) # GPTQ elif "qweight" in w: if w["scales"].dtype == torch.float: w["scales"] = w["scales"].half() # GPTQ with g_idx (act_order) if w.get("g_idx", None) is not None and not (w["g_idx"] == 0).all().item(): w["q_perm"] = torch.empty( (w["qweight"].shape[0] * 8,), dtype=torch.short, device=w["qweight"].device, ) w["q_invperm"] = torch.empty_like(w["q_perm"]) # make_q4 segfaults if g_idx is not on cpu in the act-order case. In the non act-order case, None needs to be passed for g_idx. return make_q_matrix( w["qweight"], w["q_perm"], w["q_invperm"], none_tensor, none_tensor, none_tensor, none_tensor, w["qzeros"], w["scales"], w["g_idx"].cpu(), temp_dq, ) # GPTQ without g_idx else: return make_q_matrix( w["qweight"], none_tensor, none_tensor, none_tensor, none_tensor, none_tensor, none_tensor, w["qzeros"], w["scales"], none_tensor, temp_dq, ) DEVICE = None FIXED_BYTES = 0 LAYERS = [] def set_device(device): global DEVICE DEVICE = device def create_exllama_buffers(max_total_tokens: int): global FIXED_BYTES, LAYERS, DEVICE temp_dq = ExLlamaV2DeviceTensors(DEVICE, FIXED_BYTES) for layer in LAYERS: layer.post_init(temp_dq) class QuantLinear(nn.Module): QUANT_TYPE = "exllamav2" """Linear layer implementation with per-group 4-bit quantization of the weights""" # def __init__(self, bits, group_size, infeatures, outfeatures, bias, trainable=False, **kwargs): def __init__(self, qweight, qzeros, scales, g_idx, bias, bits, groupsize): super().__init__() if bits != 4: raise ValueError( f"Exllamav2 kernel supports only bits=4, requested bits={bits}. Something is wrong in the model initialization." ) self.q_handle = None self.q_tensors = None self.bits = bits self.maxq = 2**self.bits - 1 self.infeatures = qweight.shape[0] // self.bits * 32 self.outfeatures = qweight.shape[1] self.padding = -self.outfeatures % 32 self.outfeatures = self.outfeatures + self.padding self.device = qweight.device self.qweight = qweight self.qzeros = qzeros self.scales = scales self.g_idx = g_idx self.bias = bias if bias is not None else None self.group_size = groupsize global FIXED_BYTES, LAYERS FIXED_BYTES = max(FIXED_BYTES, self.scratch_space_fixed()) LAYERS.append(self) def post_init(self, temp_dq): assert self.qweight.device.type == "cuda" assert self.qweight.device.index is not None self.q_tensors = { "qweight": self.qweight, "qzeros": self.qzeros, "scales": self.scales, "g_idx": self.g_idx, } temp_dq = temp_dq.get_scratch_slice(self.temp_dq_size()) # We NEED to keep a pointer on Python side, otherwise the garbage collector will mess with us, # and `Memory access fault by GPU node-2` will EAT you. self.temp_dq = temp_dq self.q_handle = ext_make_q_matrix(self.q_tensors, temp_dq) def forward(self, x, force_cuda=False): output = ext_gemm_half_q_half(x, self.q_handle, self.outfeatures, force_cuda) if self.bias is not None: output.add_(self.bias) return output def temp_dq_size(self): return self.infeatures * self.outfeatures * 2 + 128 def temp_fwd_size(self, max_input_len, max_batch_size): return self.outfeatures * max_input_len * max_batch_size * 4 + 128 def scratch_space_fixed(self, max_input_len=4096, max_batch_size=16): return self.temp_dq_size() + self.temp_fwd_size(max_input_len, max_batch_size) class ExLlamaV2DeviceTensors: device_idx: int scratch_bytes: int scratch_idx: int scratch: torch.tensor = None def __init__(self, device, scratch_bytes): self.device = device self.scratch_bytes = scratch_bytes def prepare(self): self.scratch = torch.empty( (self.scratch_bytes // 2,), dtype=torch.half, device=self.device ) def get_scratch_slice(self, size_bytes): if self.scratch is None: self.prepare() size_bytes = ((size_bytes + 127) // 128) * 128 size_half = size_bytes // 2 scratch_slice = self.scratch.narrow(0, 0, size_half) return scratch_slice
text-generation-inference/server/text_generation_server/utils/gptq/exllamav2.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/gptq/exllamav2.py", "repo_id": "text-generation-inference", "token_count": 3518 }
198
[target.aarch64-unknown-linux-musl] linker = "aarch64-linux-musl-gcc" rustflags = ["-C", "target-feature=-crt-static"]
tokenizers/bindings/node/.cargo/config.toml/0
{ "file_path": "tokenizers/bindings/node/.cargo/config.toml", "repo_id": "tokenizers", "token_count": 50 }
199
/* tslint:disable */ /* eslint-disable */ /* auto-generated by NAPI-RS */ export function bpeDecoder(suffix?: string | undefined | null): Decoder export function byteFallbackDecoder(): Decoder export function ctcDecoder( padToken?: string = '<pad>', wordDelimiterToken?: string | undefined | null, cleanup?: boolean | undefined | null, ): Decoder export function fuseDecoder(): Decoder export function metaspaceDecoder(replacement?: string = '▁', addPrefixSpace?: bool = true): Decoder export function replaceDecoder(pattern: string, content: string): Decoder export function sequenceDecoder(decoders: Array<Decoder>): Decoder export function stripDecoder(content: string, left: number, right: number): Decoder export function wordPieceDecoder(prefix?: string = '##', cleanup?: bool = true): Decoder export const enum TruncationDirection { Left = 'Left', Right = 'Right', } export const enum TruncationStrategy { LongestFirst = 'LongestFirst', OnlyFirst = 'OnlyFirst', OnlySecond = 'OnlySecond', } export interface BpeOptions { cacheCapacity?: number dropout?: number unkToken?: string continuingSubwordPrefix?: string endOfWordSuffix?: string fuseUnk?: boolean byteFallback?: boolean } export interface WordPieceOptions { unkToken?: string continuingSubwordPrefix?: string maxInputCharsPerWord?: number } export interface WordLevelOptions { unkToken?: string } export interface UnigramOptions { unkId?: number byteFallback?: boolean } export function prependNormalizer(prepend: string): Normalizer export function stripAccentsNormalizer(): Normalizer export interface BertNormalizerOptions { cleanText?: boolean handleChineseChars?: boolean stripAccents?: boolean lowercase?: boolean } /** * bert_normalizer(options?: { * cleanText?: bool = true, * handleChineseChars?: bool = true, * stripAccents?: bool = true, * lowercase?: bool = true * }) */ export function bertNormalizer(options?: BertNormalizerOptions | undefined | null): Normalizer export function nfdNormalizer(): Normalizer export function nfkdNormalizer(): Normalizer export function nfcNormalizer(): Normalizer export function nfkcNormalizer(): Normalizer export function stripNormalizer(left?: boolean | undefined | null, right?: boolean | undefined | null): Normalizer export function sequenceNormalizer(normalizers: Array<Normalizer>): Normalizer export function lowercase(): Normalizer export function replace(pattern: string, content: string): Normalizer export function nmt(): Normalizer export function precompiled(bytes: Array<number>): Normalizer export const enum JsSplitDelimiterBehavior { Removed = 'Removed', Isolated = 'Isolated', MergedWithPrevious = 'MergedWithPrevious', MergedWithNext = 'MergedWithNext', Contiguous = 'Contiguous', } /** byte_level(addPrefixSpace: bool = true, useRegex: bool = true) */ export function byteLevelPreTokenizer( addPrefixSpace?: boolean | undefined | null, useRegex?: boolean | undefined | null, ): PreTokenizer export function byteLevelAlphabet(): Array<string> export function whitespacePreTokenizer(): PreTokenizer export function whitespaceSplitPreTokenizer(): PreTokenizer export function bertPreTokenizer(): PreTokenizer export function metaspacePreTokenizer(replacement?: string = '▁', addPrefixSpace?: bool = true): PreTokenizer export function splitPreTokenizer(pattern: string, behavior: string, invert?: boolean | undefined | null): PreTokenizer export function punctuationPreTokenizer(behavior?: string | undefined | null): PreTokenizer export function sequencePreTokenizer(preTokenizers: Array<PreTokenizer>): PreTokenizer export function charDelimiterSplit(delimiter: string): PreTokenizer export function digitsPreTokenizer(individualDigits?: boolean | undefined | null): PreTokenizer export function bertProcessing(sep: [string, number], cls: [string, number]): Processor export function robertaProcessing( sep: [string, number], cls: [string, number], trimOffsets?: boolean | undefined | null, addPrefixSpace?: boolean | undefined | null, ): Processor export function byteLevelProcessing(trimOffsets?: boolean | undefined | null): Processor export function templateProcessing( single: string, pair?: string | undefined | null, specialTokens?: Array<[string, number]> | undefined | null, ): Processor export function sequenceProcessing(processors: Array<Processor>): Processor export const enum PaddingDirection { Left = 0, Right = 1, } export interface PaddingOptions { maxLength?: number direction?: string | PaddingDirection padToMultipleOf?: number padId?: number padTypeId?: number padToken?: string } export interface EncodeOptions { isPretokenized?: boolean addSpecialTokens?: boolean } export interface TruncationOptions { maxLength?: number strategy?: TruncationStrategy direction?: string | TruncationDirection stride?: number } export interface AddedTokenOptions { singleWord?: boolean leftStrip?: boolean rightStrip?: boolean normalized?: boolean } export interface JsFromPretrainedParameters { revision?: string authToken?: string } export function slice(s: string, beginIndex?: number | undefined | null, endIndex?: number | undefined | null): string export function mergeEncodings(encodings: Array<Encoding>, growingOffsets?: boolean | undefined | null): Encoding /** Decoder */ export class Decoder { decode(tokens: Array<string>): string } export type JsEncoding = Encoding export class Encoding { constructor() getLength(): number getNSequences(): number getIds(): Array<number> getTypeIds(): Array<number> getAttentionMask(): Array<number> getSpecialTokensMask(): Array<number> getTokens(): Array<string> getOffsets(): Array<Array<number>> getWordIds(): Array<number | undefined | null> charToToken(pos: number, seqId?: number | undefined | null): number | null charToWord(pos: number, seqId?: number | undefined | null): number | null pad(length: number, options?: PaddingOptions | undefined | null): void truncate( length: number, stride?: number | undefined | null, direction?: string | TruncationDirection | undefined | null, ): void wordToTokens(word: number, seqId?: number | undefined | null): [number, number] | null | undefined wordToChars(word: number, seqId?: number | undefined | null): [number, number] | null | undefined tokenToChars(token: number): [number, [number, number]] | null | undefined tokenToWord(token: number): number | null getOverflowing(): Array<Encoding> getSequenceIds(): Array<number | undefined | null> tokenToSequence(token: number): number | null } export class Model {} export type Bpe = BPE export class BPE { static empty(): Model static init(vocab: Vocab, merges: Merges, options?: BpeOptions | undefined | null): Model static fromFile(vocab: string, merges: string, options?: BpeOptions | undefined | null): Promise<Model> } export class WordPiece { static init(vocab: Vocab, options?: WordPieceOptions | undefined | null): Model static empty(): WordPiece static fromFile(vocab: string, options?: WordPieceOptions | undefined | null): Promise<Model> } export class WordLevel { static init(vocab: Vocab, options?: WordLevelOptions | undefined | null): Model static empty(): WordLevel static fromFile(vocab: string, options?: WordLevelOptions | undefined | null): Promise<Model> } export class Unigram { static init(vocab: Array<[string, number]>, options?: UnigramOptions | undefined | null): Model static empty(): Model } /** Normalizer */ export class Normalizer { normalizeString(sequence: string): string } /** PreTokenizers */ export class PreTokenizer { preTokenizeString(sequence: string): [string, [number, number]][] } export class Processor {} export class AddedToken { constructor(token: string, isSpecial: boolean, options?: AddedTokenOptions | undefined | null) getContent(): string } export class Tokenizer { constructor(model: Model) setPreTokenizer(preTokenizer: PreTokenizer): void setDecoder(decoder: Decoder): void setModel(model: Model): void setPostProcessor(postProcessor: Processor): void setNormalizer(normalizer: Normalizer): void save(path: string, pretty?: boolean | undefined | null): void addAddedTokens(tokens: Array<AddedToken>): number addTokens(tokens: Array<string>): number encode( sentence: InputSequence, pair?: InputSequence | null, encodeOptions?: EncodeOptions | undefined | null, ): Promise<JsEncoding> encodeBatch(sentences: EncodeInput[], encodeOptions?: EncodeOptions | undefined | null): Promise<JsEncoding[]> decode(ids: Array<number>, skipSpecialTokens: boolean): Promise<string> decodeBatch(ids: Array<Array<number>>, skipSpecialTokens: boolean): Promise<string[]> static fromString(s: string): Tokenizer static fromFile(file: string): Tokenizer addSpecialTokens(tokens: Array<string>): void setTruncation(maxLength: number, options?: TruncationOptions | undefined | null): void disableTruncation(): void setPadding(options?: PaddingOptions | undefined | null): void disablePadding(): void getDecoder(): Decoder | null getNormalizer(): Normalizer | null getPreTokenizer(): PreTokenizer | null getPostProcessor(): Processor | null getVocab(withAddedTokens?: boolean | undefined | null): Record<string, number> getVocabSize(withAddedTokens?: boolean | undefined | null): number idToToken(id: number): string | null tokenToId(token: string): number | null train(files: Array<string>): void runningTasks(): number postProcess( encoding: Encoding, pair?: Encoding | undefined | null, addSpecialTokens?: boolean | undefined | null, ): Encoding } export class Trainer {}
tokenizers/bindings/node/index.d.ts/0
{ "file_path": "tokenizers/bindings/node/index.d.ts", "repo_id": "tokenizers", "token_count": 2717 }
200
# `tokenizers-android-arm64` This is the **aarch64-linux-android** binary for `tokenizers`
tokenizers/bindings/node/npm/android-arm64/README.md/0
{ "file_path": "tokenizers/bindings/node/npm/android-arm64/README.md", "repo_id": "tokenizers", "token_count": 31 }
201
# `tokenizers-linux-x64-musl` This is the **x86_64-unknown-linux-musl** binary for `tokenizers`
tokenizers/bindings/node/npm/linux-x64-musl/README.md/0
{ "file_path": "tokenizers/bindings/node/npm/linux-x64-musl/README.md", "repo_id": "tokenizers", "token_count": 38 }
202
use crate::arc_rwlock_serde; use napi::bindgen_prelude::*; use napi_derive::napi; use serde::{Deserialize, Serialize}; use std::sync::{Arc, RwLock}; use tk::pre_tokenizers::PreTokenizerWrapper; use tk::PreTokenizedString; use tk::SplitDelimiterBehavior; use tokenizers as tk; #[napi(string_enum)] pub enum JsSplitDelimiterBehavior { Removed, Isolated, MergedWithPrevious, MergedWithNext, Contiguous, } impl TryFrom<String> for JsSplitDelimiterBehavior { type Error = Error; fn try_from(value: String) -> Result<Self> { match &value[..] { "removed" => Ok(JsSplitDelimiterBehavior::Removed), "isolated" => Ok(JsSplitDelimiterBehavior::Isolated), "mergedWithPrevious" => Ok(JsSplitDelimiterBehavior::MergedWithPrevious), "mergedWithNext" => Ok(JsSplitDelimiterBehavior::MergedWithNext), "contiguous" => Ok(JsSplitDelimiterBehavior::Contiguous), _ => Err(Error::from_reason( "Wrong value for SplitDelimiterBehavior, expected one of: \ `removed, isolated, mergedWithPrevious, mergedWithNext, contiguous`" .to_string(), )), } } } impl From<JsSplitDelimiterBehavior> for SplitDelimiterBehavior { fn from(value: JsSplitDelimiterBehavior) -> Self { match value { JsSplitDelimiterBehavior::Removed => SplitDelimiterBehavior::Removed, JsSplitDelimiterBehavior::Isolated => SplitDelimiterBehavior::Isolated, JsSplitDelimiterBehavior::MergedWithPrevious => SplitDelimiterBehavior::MergedWithPrevious, JsSplitDelimiterBehavior::MergedWithNext => SplitDelimiterBehavior::MergedWithNext, JsSplitDelimiterBehavior::Contiguous => SplitDelimiterBehavior::Contiguous, } } } /// PreTokenizers #[derive(Clone, Debug, Serialize, Deserialize)] #[napi] pub struct PreTokenizer { #[serde(flatten, with = "arc_rwlock_serde")] pretok: Option<Arc<RwLock<PreTokenizerWrapper>>>, } impl tk::PreTokenizer for PreTokenizer { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> tk::Result<()> { self .pretok .as_ref() .ok_or("Uninitialized PreTokenizer")? .read() .unwrap() .pre_tokenize(pretokenized)?; Ok(()) } } #[napi] impl PreTokenizer { #[napi(ts_return_type = "[string, [number, number]][]")] pub fn pre_tokenize_string(&self, sequence: String, env: Env) -> Result<Vec<Array>> { use tk::PreTokenizer; let mut pretokenized = PreTokenizedString::from(sequence); self .pre_tokenize(&mut pretokenized) .map_err(|e| Error::from_reason(format!("{}", e)))?; pretokenized .get_splits(tk::OffsetReferential::Original, tk::OffsetType::Char) .into_iter() .map(|(s, (start, end), _)| -> Result<Array> { let mut arr = env.create_array(2)?; let mut offset = env.create_array(2)?; offset.set(0, env.create_uint32(start as u32)?)?; offset.set(1, env.create_uint32(end as u32)?)?; arr.set(0, env.create_string(s)?)?; arr.set(1, offset)?; Ok(arr) }) .collect::<Result<Vec<_>>>() } } /// byte_level(addPrefixSpace: bool = true, useRegex: bool = true) #[napi] pub fn byte_level_pre_tokenizer( add_prefix_space: Option<bool>, use_regex: Option<bool>, ) -> PreTokenizer { let mut byte_level = tk::pre_tokenizers::byte_level::ByteLevel::default(); if let Some(add_prefix_space) = add_prefix_space { byte_level = byte_level.add_prefix_space(add_prefix_space); } if let Some(use_regex) = use_regex { byte_level = byte_level.use_regex(use_regex); } PreTokenizer { pretok: Some(Arc::new(RwLock::new(byte_level.into()))), } } #[napi] pub fn byte_level_alphabet() -> Vec<String> { tk::pre_tokenizers::byte_level::ByteLevel::alphabet() .into_iter() .map(|c| c.to_string()) .collect::<Vec<_>>() } #[napi] pub fn whitespace_pre_tokenizer() -> PreTokenizer { PreTokenizer { pretok: Some(Arc::new(RwLock::new( tk::pre_tokenizers::whitespace::Whitespace.into(), ))), } } #[napi] pub fn whitespace_split_pre_tokenizer() -> PreTokenizer { PreTokenizer { pretok: Some(Arc::new(RwLock::new( tk::pre_tokenizers::whitespace::WhitespaceSplit.into(), ))), } } #[napi] pub fn bert_pre_tokenizer() -> PreTokenizer { PreTokenizer { pretok: Some(Arc::new(RwLock::new( tk::pre_tokenizers::bert::BertPreTokenizer.into(), ))), } } #[napi] pub fn metaspace_pre_tokenizer( #[napi(ts_arg_type = "string = '▁'")] replacement: Option<String>, #[napi(ts_arg_type = "bool = true")] add_prefix_space: Option<bool>, ) -> Result<PreTokenizer> { let add_prefix_space = add_prefix_space.unwrap_or(true); let replacement = replacement.unwrap_or("▁".to_string()); if replacement.chars().count() != 1 { return Err(Error::from_reason( "replacement is supposed to be a single char", )); } let replacement = replacement.chars().next().unwrap(); Ok(PreTokenizer { pretok: Some(Arc::new(RwLock::new( tk::pre_tokenizers::metaspace::Metaspace::new(replacement, add_prefix_space).into(), ))), }) } #[napi] pub fn split_pre_tokenizer( pattern: String, behavior: String, invert: Option<bool>, ) -> Result<PreTokenizer> { let behavior: JsSplitDelimiterBehavior = behavior.try_into()?; let invert = invert.unwrap_or(false); Ok(PreTokenizer { pretok: Some(Arc::new(RwLock::new( tk::pre_tokenizers::split::Split::new(pattern, behavior.into(), invert) .map_err(|e| Error::from_reason(e.to_string()))? .into(), ))), }) } #[napi] pub fn punctuation_pre_tokenizer(behavior: Option<String>) -> Result<PreTokenizer> { let behavior = match behavior { Some(behavior) => behavior.try_into()?, None => JsSplitDelimiterBehavior::Isolated, }; Ok(PreTokenizer { pretok: Some(Arc::new(RwLock::new( tk::pre_tokenizers::punctuation::Punctuation::new(behavior.into()).into(), ))), }) } #[napi] pub fn sequence_pre_tokenizer(pre_tokenizers: Vec<&PreTokenizer>) -> PreTokenizer { let mut sequence: Vec<PreTokenizerWrapper> = Vec::with_capacity(pre_tokenizers.len()); pre_tokenizers.into_iter().for_each(|pre_tokenizer| { if let Some(pre_tokenizer) = &pre_tokenizer.pretok { sequence.push((**pre_tokenizer).read().unwrap().clone()) } }); PreTokenizer { pretok: Some(Arc::new(RwLock::new(PreTokenizerWrapper::Sequence( tk::pre_tokenizers::sequence::Sequence::new(sequence), )))), } } #[napi] pub fn char_delimiter_split(delimiter: String) -> Result<PreTokenizer> { if delimiter.chars().count() != 1 { return Err(Error::from_reason( "delimiter is supposed to be a single char", )); } let delimiter = delimiter.chars().next().unwrap(); Ok(PreTokenizer { pretok: Some(Arc::new(RwLock::new( tk::pre_tokenizers::delimiter::CharDelimiterSplit::new(delimiter).into(), ))), }) } #[napi] pub fn digits_pre_tokenizer(individual_digits: Option<bool>) -> PreTokenizer { let individual_digits = individual_digits.unwrap_or(false); PreTokenizer { pretok: Some(Arc::new(RwLock::new( tk::pre_tokenizers::digits::Digits::new(individual_digits).into(), ))), } }
tokenizers/bindings/node/src/pre_tokenizers.rs/0
{ "file_path": "tokenizers/bindings/node/src/pre_tokenizers.rs", "repo_id": "tokenizers", "token_count": 2935 }
203
.PHONY: style check-style test DATA_DIR = data dir_guard=@mkdir -p $(@D) check_dirs := examples py_src/tokenizers tests # Format source code automatically style: python stub.py black --line-length 119 --target-version py35 $(check_dirs) # Check the source code is formatted correctly check-style: python stub.py --check black --check --line-length 119 --target-version py35 examples py_src/tokenizers tests TESTS_RESOURCES = $(DATA_DIR)/small.txt $(DATA_DIR)/roberta.json # Launch the test suite test: $(TESTS_RESOURCES) pip install pytest requests setuptools_rust numpy pyarrow datasets python -m pytest -s -v tests cargo test --no-default-features $(DATA_DIR)/big.txt : $(dir_guard) wget https://norvig.com/big.txt -O $@ $(DATA_DIR)/small.txt : $(DATA_DIR)/big.txt head -100 $(DATA_DIR)/big.txt > $@ $(DATA_DIR)/roberta.json : $(dir_guard) wget https://huggingface.co/roberta-large/raw/main/tokenizer.json -O $@
tokenizers/bindings/python/Makefile/0
{ "file_path": "tokenizers/bindings/python/Makefile", "repo_id": "tokenizers", "token_count": 347 }
204
from typing import Dict, Iterator, List, Optional, Tuple, Union from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers from tokenizers.models import BPE from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str from .base_tokenizer import BaseTokenizer class ByteLevelBPETokenizer(BaseTokenizer): """ByteLevelBPETokenizer Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model """ def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None, add_prefix_space: bool = False, lowercase: bool = False, dropout: Optional[float] = None, unicode_normalizer: Optional[str] = None, continuing_subword_prefix: Optional[str] = None, end_of_word_suffix: Optional[str] = None, trim_offsets: bool = False, ): if vocab is not None and merges is not None: tokenizer = Tokenizer( BPE( vocab, merges, dropout=dropout, continuing_subword_prefix=continuing_subword_prefix or "", end_of_word_suffix=end_of_word_suffix or "", ) ) else: tokenizer = Tokenizer(BPE()) # Check for Unicode normalization first (before everything else) normalizers = [] if unicode_normalizer: normalizers += [unicode_normalizer_from_str(unicode_normalizer)] if lowercase: normalizers += [Lowercase()] # Create the normalizer structure if len(normalizers) > 0: if len(normalizers) > 1: tokenizer.normalizer = Sequence(normalizers) else: tokenizer.normalizer = normalizers[0] tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets) parameters = { "model": "ByteLevelBPE", "add_prefix_space": add_prefix_space, "lowercase": lowercase, "dropout": dropout, "unicode_normalizer": unicode_normalizer, "continuing_subword_prefix": continuing_subword_prefix, "end_of_word_suffix": end_of_word_suffix, "trim_offsets": trim_offsets, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab_filename: str, merges_filename: str, **kwargs): vocab, merges = BPE.read_file(vocab_filename, merges_filename) return ByteLevelBPETokenizer(vocab, merges, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, show_progress: bool = True, special_tokens: List[Union[str, AddedToken]] = [], ): """Train the model using the given files""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, show_progress=show_progress, special_tokens=special_tokens, initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, show_progress: bool = True, special_tokens: List[Union[str, AddedToken]] = [], length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, show_progress=show_progress, special_tokens=special_tokens, initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
tokenizers/bindings/python/py_src/tokenizers/implementations/byte_level_bpe.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/byte_level_bpe.py", "repo_id": "tokenizers", "token_count": 1978 }
205
# Generated content DO NOT EDIT class Trainer: """ Base class for all trainers This class is not supposed to be instantiated directly. Instead, any implementation of a Trainer will return an instance of this class when instantiated. """ class BpeTrainer(Trainer): """ Trainer capable of training a BPE model Args: vocab_size (:obj:`int`, `optional`): The size of the final vocabulary, including all tokens and alphabet. min_frequency (:obj:`int`, `optional`): The minimum frequency a pair should have in order to be merged. show_progress (:obj:`bool`, `optional`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. limit_alphabet (:obj:`int`, `optional`): The maximum different characters to keep in the alphabet. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. continuing_subword_prefix (:obj:`str`, `optional`): A prefix to be used for every subword that is not a beginning-of-word. end_of_word_suffix (:obj:`str`, `optional`): A suffix to be used for every subword that is a end-of-word. max_token_length (:obj:`int`, `optional`): Prevents creating tokens longer than the specified size. This can help with reducing polluting your vocabulary with highly repetitive tokens like `======` for wikipedia """ class UnigramTrainer(Trainer): """ Trainer capable of training a Unigram model Args: vocab_size (:obj:`int`): The size of the final vocabulary, including all tokens and alphabet. show_progress (:obj:`bool`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`): A list of special tokens the model should know of. initial_alphabet (:obj:`List[str]`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. shrinking_factor (:obj:`float`): The shrinking factor used at each step of the training to prune the vocabulary. unk_token (:obj:`str`): The token used for out-of-vocabulary tokens. max_piece_length (:obj:`int`): The maximum length of a given token. n_sub_iterations (:obj:`int`): The number of iterations of the EM algorithm to perform before pruning the vocabulary. """ def __init__( self, vocab_size=8000, show_progress=True, special_tokens=[], shrinking_factor=0.75, unk_token=None, max_piece_length=16, n_sub_iterations=2, ): pass class WordLevelTrainer(Trainer): """ Trainer capable of training a WorldLevel model Args: vocab_size (:obj:`int`, `optional`): The size of the final vocabulary, including all tokens and alphabet. min_frequency (:obj:`int`, `optional`): The minimum frequency a pair should have in order to be merged. show_progress (:obj:`bool`, `optional`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`): A list of special tokens the model should know of. """ class WordPieceTrainer(Trainer): """ Trainer capable of training a WordPiece model Args: vocab_size (:obj:`int`, `optional`): The size of the final vocabulary, including all tokens and alphabet. min_frequency (:obj:`int`, `optional`): The minimum frequency a pair should have in order to be merged. show_progress (:obj:`bool`, `optional`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. limit_alphabet (:obj:`int`, `optional`): The maximum different characters to keep in the alphabet. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. continuing_subword_prefix (:obj:`str`, `optional`): A prefix to be used for every subword that is not a beginning-of-word. end_of_word_suffix (:obj:`str`, `optional`): A suffix to be used for every subword that is a end-of-word. """ def __init__( self, vocab_size=30000, min_frequency=0, show_progress=True, special_tokens=[], limit_alphabet=None, initial_alphabet=[], continuing_subword_prefix="##", end_of_word_suffix=None, ): pass
tokenizers/bindings/python/py_src/tokenizers/trainers/__init__.pyi/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/trainers/__init__.pyi", "repo_id": "tokenizers", "token_count": 2178 }
206
use std::collections::{hash_map::DefaultHasher, HashMap}; use std::hash::{Hash, Hasher}; use numpy::{npyffi, PyArray1}; use pyo3::class::basic::CompareOp; use pyo3::exceptions; use pyo3::intern; use pyo3::prelude::*; use pyo3::types::*; use tk::models::bpe::BPE; use tk::tokenizer::{ Model, PaddingDirection, PaddingParams, PaddingStrategy, PostProcessor, TokenizerImpl, TruncationDirection, TruncationParams, TruncationStrategy, }; use tk::utils::iter::ResultShunt; use tokenizers as tk; use super::decoders::PyDecoder; use super::encoding::PyEncoding; use super::error::{PyError, ToPyResult}; use super::models::PyModel; use super::normalizers::PyNormalizer; use super::pre_tokenizers::PyPreTokenizer; use super::trainers::PyTrainer; use crate::processors::PyPostProcessor; use crate::utils::{MaybeSizedIterator, PyBufferedIterator}; use std::collections::BTreeMap; /// Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`. /// It can have special options that defines the way it should behave. /// /// Args: /// content (:obj:`str`): The content of the token /// /// single_word (:obj:`bool`, defaults to :obj:`False`): /// Defines whether this token should only match single words. If :obj:`True`, this /// token will never match inside of a word. For example the token ``ing`` would match /// on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`. /// The notion of "`inside of a word`" is defined by the word boundaries pattern in /// regular expressions (ie. the token should start and end with word boundaries). /// /// lstrip (:obj:`bool`, defaults to :obj:`False`): /// Defines whether this token should strip all potential whitespaces on its left side. /// If :obj:`True`, this token will greedily match any whitespace on its left. For /// example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text /// ``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left). /// /// rstrip (:obj:`bool`, defaults to :obj:`False`): /// Defines whether this token should strip all potential whitespaces on its right /// side. If :obj:`True`, this token will greedily match any whitespace on its right. /// It works just like :obj:`lstrip` but on the right. /// /// normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): /// Defines whether this token should match against the normalized version of the input /// text. For example, with the added token ``"yesterday"``, and a normalizer in charge of /// lowercasing the text, the token could be extract from the input ``"I saw a lion /// Yesterday"``. /// special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): /// Defines whether this token should be skipped when decoding. /// #[pyclass(dict, module = "tokenizers", name = "AddedToken")] pub struct PyAddedToken { pub content: String, pub special: bool, pub single_word: Option<bool>, pub lstrip: Option<bool>, pub rstrip: Option<bool>, pub normalized: Option<bool>, } impl PyAddedToken { pub fn from<S: Into<String>>(content: S, special: Option<bool>) -> Self { Self { content: content.into(), special: special.unwrap_or(false), single_word: None, lstrip: None, rstrip: None, normalized: None, } } pub fn get_token(&self) -> tk::tokenizer::AddedToken { let mut token = tk::AddedToken::from(&self.content, self.special); if let Some(sw) = self.single_word { token = token.single_word(sw); } if let Some(ls) = self.lstrip { token = token.lstrip(ls); } if let Some(rs) = self.rstrip { token = token.rstrip(rs); } if let Some(n) = self.normalized { token = token.normalized(n); } token } pub fn as_pydict<'py>(&self, py: Python<'py>) -> PyResult<&'py PyDict> { let dict = PyDict::new(py); let token = self.get_token(); dict.set_item("content", token.content)?; dict.set_item("single_word", token.single_word)?; dict.set_item("lstrip", token.lstrip)?; dict.set_item("rstrip", token.rstrip)?; dict.set_item("normalized", token.normalized)?; dict.set_item("special", token.special)?; Ok(dict) } } impl From<tk::AddedToken> for PyAddedToken { fn from(token: tk::AddedToken) -> Self { Self { content: token.content, single_word: Some(token.single_word), lstrip: Some(token.lstrip), rstrip: Some(token.rstrip), normalized: Some(token.normalized), special: token.special, } } } #[pymethods] impl PyAddedToken { #[new] #[pyo3(signature = (content=None, **kwargs), text_signature = "(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False)")] fn __new__(content: Option<&str>, kwargs: Option<&PyDict>) -> PyResult<Self> { let mut token = PyAddedToken::from(content.unwrap_or(""), None); if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "single_word" => token.single_word = Some(value.extract()?), "lstrip" => token.lstrip = Some(value.extract()?), "rstrip" => token.rstrip = Some(value.extract()?), "normalized" => token.normalized = Some(value.extract()?), "special" => token.special = value.extract()?, _ => println!("Ignored unknown kwarg option {}", key), } } } Ok(token) } fn __getstate__<'py>(&self, py: Python<'py>) -> PyResult<&'py PyDict> { self.as_pydict(py) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyDict>(py) { Ok(state) => { for (key, value) in state { let key: &str = key.extract()?; match key { "content" => self.content = value.extract()?, "single_word" => self.single_word = Some(value.extract()?), "lstrip" => self.lstrip = Some(value.extract()?), "rstrip" => self.rstrip = Some(value.extract()?), "normalized" => self.normalized = Some(value.extract()?), "special" => self.special = value.extract()?, _ => {} } } Ok(()) } Err(e) => Err(e), } } /// Get the content of this :obj:`AddedToken` #[getter] fn get_content(&self) -> &str { &self.content } /// Set the content of this :obj:`AddedToken` #[setter] fn set_content(&mut self, content: String) { self.content = content; } /// Get the value of the :obj:`rstrip` option #[getter] fn get_rstrip(&self) -> bool { self.get_token().rstrip } /// Get the value of the :obj:`lstrip` option #[getter] fn get_lstrip(&self) -> bool { self.get_token().lstrip } /// Get the value of the :obj:`single_word` option #[getter] fn get_single_word(&self) -> bool { self.get_token().single_word } /// Get the value of the :obj:`normalized` option #[getter] fn get_normalized(&self) -> bool { self.get_token().normalized } /// Get the value of the :obj:`special` option #[getter] fn get_special(&self) -> bool { self.get_token().special } /// Set the value of the :obj:`special` option #[setter] fn set_special(&mut self, special: bool) { self.special = special; } fn __str__(&self) -> PyResult<&str> { Ok(&self.content) } fn __repr__(&self) -> PyResult<String> { let bool_to_python = |p| match p { true => "True", false => "False", }; let token = self.get_token(); Ok(format!( "AddedToken(\"{}\", rstrip={}, lstrip={}, single_word={}, normalized={}, special={})", self.content, bool_to_python(token.rstrip), bool_to_python(token.lstrip), bool_to_python(token.single_word), bool_to_python(token.normalized), bool_to_python(token.special) )) } fn __richcmp__(&self, other: Py<PyAddedToken>, op: CompareOp) -> bool { use CompareOp::*; Python::with_gil(|py| match op { Lt | Le | Gt | Ge => false, Eq => self.get_token() == other.borrow(py).get_token(), Ne => self.get_token() != other.borrow(py).get_token(), }) } fn __hash__(&self) -> u64 { let mut hasher = DefaultHasher::new(); self.get_token().hash(&mut hasher); hasher.finish() } } struct TextInputSequence<'s>(tk::InputSequence<'s>); impl<'s> FromPyObject<'s> for TextInputSequence<'s> { fn extract(ob: &'s PyAny) -> PyResult<Self> { let err = exceptions::PyTypeError::new_err("TextInputSequence must be str"); if let Ok(s) = ob.downcast::<PyString>() { Ok(Self(s.to_string_lossy().into())) } else { Err(err) } } } impl<'s> From<TextInputSequence<'s>> for tk::InputSequence<'s> { fn from(s: TextInputSequence<'s>) -> Self { s.0 } } struct PyArrayUnicode(Vec<String>); impl FromPyObject<'_> for PyArrayUnicode { fn extract(ob: &PyAny) -> PyResult<Self> { // SAFETY Making sure the pointer is a valid numpy array requires calling numpy C code if unsafe { npyffi::PyArray_Check(ob.py(), ob.as_ptr()) } == 0 { return Err(exceptions::PyTypeError::new_err("Expected an np.array")); } let arr = ob.as_ptr() as *mut npyffi::PyArrayObject; // SAFETY Getting all the metadata about the numpy array to check its sanity let (type_num, elsize, alignment, data, nd, flags) = unsafe { let desc = (*arr).descr; ( (*desc).type_num, (*desc).elsize as usize, (*desc).alignment as usize, (*arr).data, (*arr).nd, (*arr).flags, ) }; if nd != 1 { return Err(exceptions::PyTypeError::new_err( "Expected a 1 dimensional np.array", )); } if flags & (npyffi::NPY_ARRAY_C_CONTIGUOUS | npyffi::NPY_ARRAY_F_CONTIGUOUS) == 0 { return Err(exceptions::PyTypeError::new_err( "Expected a contiguous np.array", )); } if type_num != npyffi::types::NPY_TYPES::NPY_UNICODE as i32 { return Err(exceptions::PyTypeError::new_err( "Expected a np.array[dtype='U']", )); } // SAFETY Looking at the raw numpy data to create new owned Rust strings via copies (so it's safe afterwards). unsafe { let n_elem = *(*arr).dimensions as usize; let all_bytes = std::slice::from_raw_parts(data as *const u8, elsize * n_elem); let seq = (0..n_elem) .map(|i| { let bytes = &all_bytes[i * elsize..(i + 1) * elsize]; let unicode = pyo3::ffi::PyUnicode_FromKindAndData( pyo3::ffi::PyUnicode_4BYTE_KIND as _, bytes.as_ptr() as *const _, elsize as isize / alignment as isize, ); let py = ob.py(); let obj = PyObject::from_owned_ptr(py, unicode); let s = obj.downcast::<PyString>(py)?; Ok(s.to_string_lossy().trim_matches(char::from(0)).to_owned()) }) .collect::<PyResult<Vec<_>>>()?; Ok(Self(seq)) } } } impl From<PyArrayUnicode> for tk::InputSequence<'_> { fn from(s: PyArrayUnicode) -> Self { s.0.into() } } struct PyArrayStr(Vec<String>); impl FromPyObject<'_> for PyArrayStr { fn extract(ob: &PyAny) -> PyResult<Self> { let array = ob.downcast::<PyArray1<PyObject>>()?; let seq = array .readonly() .as_array() .iter() .map(|obj| { let s = obj.downcast::<PyString>(ob.py())?; Ok(s.to_string_lossy().into_owned()) }) .collect::<PyResult<Vec<_>>>()?; Ok(Self(seq)) } } impl From<PyArrayStr> for tk::InputSequence<'_> { fn from(s: PyArrayStr) -> Self { s.0.into() } } struct PreTokenizedInputSequence<'s>(tk::InputSequence<'s>); impl<'s> FromPyObject<'s> for PreTokenizedInputSequence<'s> { fn extract(ob: &'s PyAny) -> PyResult<Self> { if let Ok(seq) = ob.extract::<PyArrayUnicode>() { return Ok(Self(seq.into())); } if let Ok(seq) = ob.extract::<PyArrayStr>() { return Ok(Self(seq.into())); } if let Ok(s) = ob.downcast::<PyList>() { if let Ok(seq) = s.extract::<Vec<&str>>() { return Ok(Self(seq.into())); } } if let Ok(s) = ob.downcast::<PyTuple>() { if let Ok(seq) = s.extract::<Vec<&str>>() { return Ok(Self(seq.into())); } } Err(exceptions::PyTypeError::new_err( "PreTokenizedInputSequence must be Union[List[str], Tuple[str]]", )) } } impl<'s> From<PreTokenizedInputSequence<'s>> for tk::InputSequence<'s> { fn from(s: PreTokenizedInputSequence<'s>) -> Self { s.0 } } struct TextEncodeInput<'s>(tk::EncodeInput<'s>); impl<'s> FromPyObject<'s> for TextEncodeInput<'s> { fn extract(ob: &'s PyAny) -> PyResult<Self> { if let Ok(i) = ob.extract::<TextInputSequence>() { return Ok(Self(i.into())); } if let Ok((i1, i2)) = ob.extract::<(TextInputSequence, TextInputSequence)>() { return Ok(Self((i1, i2).into())); } if let Ok(arr) = ob.extract::<Vec<&PyAny>>() { if arr.len() == 2 { let first = arr[0].extract::<TextInputSequence>()?; let second = arr[1].extract::<TextInputSequence>()?; return Ok(Self((first, second).into())); } } Err(exceptions::PyTypeError::new_err( "TextEncodeInput must be Union[TextInputSequence, Tuple[InputSequence, InputSequence]]", )) } } impl<'s> From<TextEncodeInput<'s>> for tk::tokenizer::EncodeInput<'s> { fn from(i: TextEncodeInput<'s>) -> Self { i.0 } } struct PreTokenizedEncodeInput<'s>(tk::EncodeInput<'s>); impl<'s> FromPyObject<'s> for PreTokenizedEncodeInput<'s> { fn extract(ob: &'s PyAny) -> PyResult<Self> { if let Ok(i) = ob.extract::<PreTokenizedInputSequence>() { return Ok(Self(i.into())); } if let Ok((i1, i2)) = ob.extract::<(PreTokenizedInputSequence, PreTokenizedInputSequence)>() { return Ok(Self((i1, i2).into())); } if let Ok(arr) = ob.extract::<Vec<&PyAny>>() { if arr.len() == 2 { let first = arr[0].extract::<PreTokenizedInputSequence>()?; let second = arr[1].extract::<PreTokenizedInputSequence>()?; return Ok(Self((first, second).into())); } } Err(exceptions::PyTypeError::new_err( "PreTokenizedEncodeInput must be Union[PreTokenizedInputSequence, \ Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence]]", )) } } impl<'s> From<PreTokenizedEncodeInput<'s>> for tk::tokenizer::EncodeInput<'s> { fn from(i: PreTokenizedEncodeInput<'s>) -> Self { i.0 } } type Tokenizer = TokenizerImpl<PyModel, PyNormalizer, PyPreTokenizer, PyPostProcessor, PyDecoder>; /// A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input /// and outputs an :class:`~tokenizers.Encoding`. /// /// Args: /// model (:class:`~tokenizers.models.Model`): /// The core algorithm that this :obj:`Tokenizer` should be using. /// #[pyclass(dict, module = "tokenizers", name = "Tokenizer")] #[derive(Clone)] pub struct PyTokenizer { tokenizer: Tokenizer, } impl PyTokenizer { fn new(tokenizer: Tokenizer) -> Self { PyTokenizer { tokenizer } } fn from_model(model: PyModel) -> Self { PyTokenizer::new(TokenizerImpl::new(model)) } } #[pymethods] impl PyTokenizer { #[new] #[pyo3(text_signature = "(self, model)")] fn __new__(model: PyRef<PyModel>) -> Self { PyTokenizer::from_model(model.clone()) } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.tokenizer).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Tokenizer: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.tokenizer = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Tokenizer: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { let model = PyModel::from(BPE::default()).into_py(py); PyTuple::new(py, vec![model]) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string. /// /// Args: /// json (:obj:`str`): /// A valid JSON string representing a previously serialized /// :class:`~tokenizers.Tokenizer` /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(text_signature = "(json)")] fn from_str(json: &str) -> PyResult<Self> { let tokenizer: PyResult<_> = ToPyResult(json.parse()).into(); Ok(Self::new(tokenizer?)) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path. /// /// Args: /// path (:obj:`str`): /// A path to a local JSON file representing a previously serialized /// :class:`~tokenizers.Tokenizer` /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(text_signature = "(path)")] fn from_file(path: &str) -> PyResult<Self> { let tokenizer: PyResult<_> = ToPyResult(Tokenizer::from_file(path)).into(); Ok(Self::new(tokenizer?)) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer. /// /// Args: /// buffer (:obj:`bytes`): /// A buffer containing a previously serialized :class:`~tokenizers.Tokenizer` /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(text_signature = "(buffer)")] fn from_buffer(buffer: &PyBytes) -> PyResult<Self> { let tokenizer = serde_json::from_slice(buffer.as_bytes()).map_err(|e| { exceptions::PyValueError::new_err(format!( "Cannot instantiate Tokenizer from buffer: {}", e )) })?; Ok(Self { tokenizer }) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the /// Hugging Face Hub. /// /// Args: /// identifier (:obj:`str`): /// The identifier of a Model on the Hugging Face Hub, that contains /// a tokenizer.json file /// revision (:obj:`str`, defaults to `main`): /// A branch or commit id /// auth_token (:obj:`str`, `optional`, defaults to `None`): /// An optional auth token used to access private repositories on the /// Hugging Face Hub /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(signature = (identifier, revision = String::from("main"), auth_token = None))] #[pyo3(text_signature = "(identifier, revision=\"main\", auth_token=None)")] fn from_pretrained( identifier: &str, revision: String, auth_token: Option<String>, ) -> PyResult<Self> { let path = Python::with_gil(|py| -> PyResult<String> { let huggingface_hub = PyModule::import(py, intern!(py, "huggingface_hub"))?; let hf_hub_download = huggingface_hub.getattr(intern!(py, "hf_hub_download"))?; let kwargs = [ (intern!(py, "repo_id"), identifier), (intern!(py, "filename"), "tokenizer.json"), (intern!(py, "revision"), &revision), ] .into_py_dict(py); if let Some(auth_token) = auth_token { kwargs.set_item(intern!(py, "token"), auth_token)?; } let path: String = hf_hub_download.call((), Some(kwargs))?.extract()?; Ok(path) })?; let tokenizer: PyResult<_> = ToPyResult(Tokenizer::from_file(path)).into(); Ok(Self::new(tokenizer?)) } /// Gets a serialized string representing this :class:`~tokenizers.Tokenizer`. /// /// Args: /// pretty (:obj:`bool`, defaults to :obj:`False`): /// Whether the JSON string should be pretty formatted. /// /// Returns: /// :obj:`str`: A string representing the serialized Tokenizer #[pyo3(signature = (pretty = false))] #[pyo3(text_signature = "(self, pretty=False)")] fn to_str(&self, pretty: bool) -> PyResult<String> { ToPyResult(self.tokenizer.to_string(pretty)).into() } /// Save the :class:`~tokenizers.Tokenizer` to the file at the given path. /// /// Args: /// path (:obj:`str`): /// A path to a file in which to save the serialized tokenizer. /// /// pretty (:obj:`bool`, defaults to :obj:`True`): /// Whether the JSON file should be pretty formatted. #[pyo3(signature = (path, pretty = true))] #[pyo3(text_signature = "(self, path, pretty=True)")] fn save(&self, path: &str, pretty: bool) -> PyResult<()> { ToPyResult(self.tokenizer.save(path, pretty)).into() } /// Return the number of special tokens that would be added for single/pair sentences. /// :param is_pair: Boolean indicating if the input would be a single sentence or a pair /// :return: #[pyo3(text_signature = "(self, is_pair)")] fn num_special_tokens_to_add(&self, is_pair: bool) -> usize { self.tokenizer .get_post_processor() .map_or(0, |p| p.added_tokens(is_pair)) } /// Get the underlying vocabulary /// /// Args: /// with_added_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to include the added tokens /// /// Returns: /// :obj:`Dict[str, int]`: The vocabulary #[pyo3(signature = (with_added_tokens = true))] #[pyo3(text_signature = "(self, with_added_tokens=True)")] fn get_vocab(&self, with_added_tokens: bool) -> HashMap<String, u32> { self.tokenizer.get_vocab(with_added_tokens) } /// Get the underlying vocabulary /// /// Returns: /// :obj:`Dict[int, AddedToken]`: The vocabulary #[pyo3(signature = ())] #[pyo3(text_signature = "(self)")] fn get_added_tokens_decoder(&self) -> BTreeMap<u32, PyAddedToken> { let mut sorted_map = BTreeMap::new(); for (key, value) in self.tokenizer.get_added_tokens_decoder() { sorted_map.insert(key, value.into()); } sorted_map } /// Get the size of the underlying vocabulary /// /// Args: /// with_added_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to include the added tokens /// /// Returns: /// :obj:`int`: The size of the vocabulary #[pyo3(signature = (with_added_tokens = true))] #[pyo3(text_signature = "(self, with_added_tokens=True)")] fn get_vocab_size(&self, with_added_tokens: bool) -> usize { self.tokenizer.get_vocab_size(with_added_tokens) } /// Enable truncation /// /// Args: /// max_length (:obj:`int`): /// The max length at which to truncate /// /// stride (:obj:`int`, `optional`): /// The length of the previous first sequence to be included in the overflowing /// sequence /// /// strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`): /// The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or /// ``only_second``. /// /// direction (:obj:`str`, defaults to :obj:`right`): /// Truncate direction #[pyo3(signature = (max_length, **kwargs))] #[pyo3( text_signature = "(self, max_length, stride=0, strategy='longest_first', direction='right')" )] fn enable_truncation(&mut self, max_length: usize, kwargs: Option<&PyDict>) -> PyResult<()> { let mut params = TruncationParams { max_length, ..Default::default() }; if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "stride" => params.stride = value.extract()?, "strategy" => { let value: &str = value.extract()?; params.strategy = match value { "longest_first" => Ok(TruncationStrategy::LongestFirst), "only_first" => Ok(TruncationStrategy::OnlyFirst), "only_second" => Ok(TruncationStrategy::OnlySecond), _ => Err(PyError(format!( "Unknown `strategy`: `{}`. Use \ one of `longest_first`, `only_first`, or `only_second`", value )) .into_pyerr::<exceptions::PyValueError>()), }? } "direction" => { let value: &str = value.extract()?; params.direction = match value { "left" => Ok(TruncationDirection::Left), "right" => Ok(TruncationDirection::Right), _ => Err(PyError(format!( "Unknown `direction`: `{}`. Use \ one of `left` or `right`.", value )) .into_pyerr::<exceptions::PyValueError>()), }? } _ => println!("Ignored unknown kwarg option {}", key), } } } if let Err(error_message) = self.tokenizer.with_truncation(Some(params)) { return Err(PyError(error_message.to_string()).into_pyerr::<exceptions::PyValueError>()); } Ok(()) } /// Disable truncation #[pyo3(text_signature = "(self)")] fn no_truncation(&mut self) { self.tokenizer .with_truncation(None) .expect("Failed to set truncation to `None`! This should never happen"); } /// Get the currently set truncation parameters /// /// `Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead` /// /// Returns: /// (:obj:`dict`, `optional`): /// A dict with the current truncation parameters if truncation is enabled #[getter] fn get_truncation<'py>(&self, py: Python<'py>) -> PyResult<Option<&'py PyDict>> { self.tokenizer.get_truncation().map_or(Ok(None), |params| { let dict = PyDict::new(py); dict.set_item("max_length", params.max_length)?; dict.set_item("stride", params.stride)?; dict.set_item("strategy", params.strategy.as_ref())?; dict.set_item("direction", params.direction.as_ref())?; Ok(Some(dict)) }) } /// Enable the padding /// /// Args: /// direction (:obj:`str`, `optional`, defaults to :obj:`right`): /// The direction in which to pad. Can be either ``right`` or ``left`` /// /// pad_to_multiple_of (:obj:`int`, `optional`): /// If specified, the padding length should always snap to the next multiple of the /// given value. For example if we were going to pad witha length of 250 but /// ``pad_to_multiple_of=8`` then we will pad to 256. /// /// pad_id (:obj:`int`, defaults to 0): /// The id to be used when padding /// /// pad_type_id (:obj:`int`, defaults to 0): /// The type id to be used when padding /// /// pad_token (:obj:`str`, defaults to :obj:`[PAD]`): /// The pad token to be used when padding /// /// length (:obj:`int`, `optional`): /// If specified, the length at which to pad. If not specified we pad using the size of /// the longest sequence in a batch. #[pyo3(signature = (**kwargs))] #[pyo3( text_signature = "(self, direction='right', pad_id=0, pad_type_id=0, pad_token='[PAD]', length=None, pad_to_multiple_of=None)" )] fn enable_padding(&mut self, kwargs: Option<&PyDict>) -> PyResult<()> { let mut params = PaddingParams::default(); if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "direction" => { let value: &str = value.extract()?; params.direction = match value { "left" => Ok(PaddingDirection::Left), "right" => Ok(PaddingDirection::Right), other => Err(PyError(format!( "Unknown `direction`: `{}`. Use \ one of `left` or `right`", other )) .into_pyerr::<exceptions::PyValueError>()), }?; } "pad_to_multiple_of" => { if let Some(multiple) = value.extract()? { params.pad_to_multiple_of = multiple; } } "pad_id" => params.pad_id = value.extract()?, "pad_type_id" => params.pad_type_id = value.extract()?, "pad_token" => params.pad_token = value.extract()?, "max_length" => { println!( "enable_padding(max_length=X) is deprecated, \ use enable_padding(length=X) instead" ); if let Some(l) = value.extract()? { params.strategy = PaddingStrategy::Fixed(l); } else { params.strategy = PaddingStrategy::BatchLongest; } } "length" => { if let Some(l) = value.extract()? { params.strategy = PaddingStrategy::Fixed(l); } else { params.strategy = PaddingStrategy::BatchLongest; } } _ => println!("Ignored unknown kwarg option {}", key), } } } self.tokenizer.with_padding(Some(params)); Ok(()) } /// Disable padding #[pyo3(text_signature = "(self)")] fn no_padding(&mut self) { self.tokenizer.with_padding(None); } /// Get the current padding parameters /// /// `Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead` /// /// Returns: /// (:obj:`dict`, `optional`): /// A dict with the current padding parameters if padding is enabled #[getter] fn get_padding<'py>(&self, py: Python<'py>) -> PyResult<Option<&'py PyDict>> { self.tokenizer.get_padding().map_or(Ok(None), |params| { let dict = PyDict::new(py); dict.set_item( "length", match params.strategy { tk::PaddingStrategy::BatchLongest => None, tk::PaddingStrategy::Fixed(size) => Some(size), }, )?; dict.set_item("pad_to_multiple_of", params.pad_to_multiple_of)?; dict.set_item("pad_id", params.pad_id)?; dict.set_item("pad_token", &params.pad_token)?; dict.set_item("pad_type_id", params.pad_type_id)?; dict.set_item("direction", params.direction.as_ref())?; Ok(Some(dict)) }) } /// Encode the given sequence and pair. This method can process raw text sequences /// as well as already pre-tokenized sequences. /// /// Example: /// Here are some examples of the inputs that are accepted:: /// /// encode("A single sequence")` /// encode("A sequence", "And its pair")` /// encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)` /// encode( /// [ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ], /// is_pretokenized=True /// ) /// /// Args: /// sequence (:obj:`~tokenizers.InputSequence`): /// The main input sequence we want to encode. This sequence can be either raw /// text or pre-tokenized, according to the ``is_pretokenized`` argument: /// /// - If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence` /// - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence` /// /// pair (:obj:`~tokenizers.InputSequence`, `optional`): /// An optional input sequence. The expected format is the same that for ``sequence``. /// /// is_pretokenized (:obj:`bool`, defaults to :obj:`False`): /// Whether the input is already pre-tokenized /// /// add_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to add the special tokens /// /// Returns: /// :class:`~tokenizers.Encoding`: The encoded result /// #[pyo3(signature = (sequence, pair = None, is_pretokenized = false, add_special_tokens = true))] #[pyo3( text_signature = "(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True)" )] fn encode( &self, sequence: &PyAny, pair: Option<&PyAny>, is_pretokenized: bool, add_special_tokens: bool, ) -> PyResult<PyEncoding> { let sequence: tk::InputSequence = if is_pretokenized { sequence.extract::<PreTokenizedInputSequence>()?.into() } else { sequence.extract::<TextInputSequence>()?.into() }; let input = match pair { Some(pair) => { let pair: tk::InputSequence = if is_pretokenized { pair.extract::<PreTokenizedInputSequence>()?.into() } else { pair.extract::<TextInputSequence>()?.into() }; tk::EncodeInput::Dual(sequence, pair) } None => tk::EncodeInput::Single(sequence), }; ToPyResult( self.tokenizer .encode_char_offsets(input, add_special_tokens) .map(|e| e.into()), ) .into() } /// Encode the given batch of inputs. This method accept both raw text sequences /// as well as already pre-tokenized sequences. /// /// Example: /// Here are some examples of the inputs that are accepted:: /// /// encode_batch([ /// "A single sequence", /// ("A tuple with a sequence", "And its pair"), /// [ "A", "pre", "tokenized", "sequence" ], /// ([ "A", "pre", "tokenized", "sequence" ], "And its pair") /// ]) /// /// Args: /// input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`): /// A list of single sequences or pair sequences to encode. Each sequence /// can be either raw text or pre-tokenized, according to the ``is_pretokenized`` /// argument: /// /// - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput` /// - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput` /// /// is_pretokenized (:obj:`bool`, defaults to :obj:`False`): /// Whether the input is already pre-tokenized /// /// add_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to add the special tokens /// /// Returns: /// A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch /// #[pyo3(signature = (input, is_pretokenized = false, add_special_tokens = true))] #[pyo3(text_signature = "(self, input, is_pretokenized=False, add_special_tokens=True)")] fn encode_batch( &self, py: Python<'_>, input: Vec<&PyAny>, is_pretokenized: bool, add_special_tokens: bool, ) -> PyResult<Vec<PyEncoding>> { let input: Vec<tk::EncodeInput> = input .into_iter() .map(|o| { let input: tk::EncodeInput = if is_pretokenized { o.extract::<PreTokenizedEncodeInput>()?.into() } else { o.extract::<TextEncodeInput>()?.into() }; Ok(input) }) .collect::<PyResult<Vec<tk::EncodeInput>>>()?; py.allow_threads(|| { ToPyResult( self.tokenizer .encode_batch_char_offsets(input, add_special_tokens) .map(|encodings| encodings.into_iter().map(|e| e.into()).collect()), ) .into() }) } /// Decode the given list of ids back to a string /// /// This is used to decode anything coming back from a Language Model /// /// Args: /// ids (A :obj:`List/Tuple` of :obj:`int`): /// The list of ids that we want to decode /// /// skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether the special tokens should be removed from the decoded string /// /// Returns: /// :obj:`str`: The decoded string #[pyo3(signature = (ids, skip_special_tokens = true))] #[pyo3(text_signature = "(self, ids, skip_special_tokens=True)")] fn decode(&self, ids: Vec<u32>, skip_special_tokens: bool) -> PyResult<String> { ToPyResult(self.tokenizer.decode(&ids, skip_special_tokens)).into() } /// Decode a batch of ids back to their corresponding string /// /// Args: /// sequences (:obj:`List` of :obj:`List[int]`): /// The batch of sequences we want to decode /// /// skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether the special tokens should be removed from the decoded strings /// /// Returns: /// :obj:`List[str]`: A list of decoded strings #[pyo3(signature = (sequences, skip_special_tokens = true))] #[pyo3(text_signature = "(self, sequences, skip_special_tokens=True)")] fn decode_batch( &self, py: Python<'_>, sequences: Vec<Vec<u32>>, skip_special_tokens: bool, ) -> PyResult<Vec<String>> { py.allow_threads(|| { let slices = sequences.iter().map(|v| &v[..]).collect::<Vec<&[u32]>>(); ToPyResult(self.tokenizer.decode_batch(&slices, skip_special_tokens)).into() }) } /// Convert the given token to its corresponding id if it exists /// /// Args: /// token (:obj:`str`): /// The token to convert /// /// Returns: /// :obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary #[pyo3(text_signature = "(self, token)")] fn token_to_id(&self, token: &str) -> Option<u32> { self.tokenizer.token_to_id(token) } /// Convert the given id to its corresponding token if it exists /// /// Args: /// id (:obj:`int`): /// The id to convert /// /// Returns: /// :obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary #[pyo3(text_signature = "(self, id)")] fn id_to_token(&self, id: u32) -> Option<String> { self.tokenizer.id_to_token(id) } /// Modifies the tokenizer in order to use or not the special tokens /// during encoding. /// /// Args: /// value (:obj:`bool`): /// Whether to use the special tokens or not /// #[setter] fn set_encode_special_tokens(&mut self, value: bool) { self.tokenizer.set_encode_special_tokens(value); } /// Get the value of the `encode_special_tokens` attribute /// /// Returns: /// :obj:`bool`: the tokenizer's encode_special_tokens attribute #[getter] fn get_encode_special_tokens(&self) -> bool { self.tokenizer.get_encode_special_tokens() } /// Add the given tokens to the vocabulary /// /// The given tokens are added only if they don't already exist in the vocabulary. /// Each token then gets a new attributed id. /// /// Args: /// tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): /// The list of tokens we want to add to the vocabulary. Each token can be either a /// string or an instance of :class:`~tokenizers.AddedToken` for more customization. /// /// Returns: /// :obj:`int`: The number of tokens that were created in the vocabulary #[pyo3(text_signature = "(self, tokens)")] fn add_tokens(&mut self, tokens: &PyList) -> PyResult<usize> { let tokens = tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(false)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = false; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Input must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?; Ok(self.tokenizer.add_tokens(&tokens)) } /// Add the given special tokens to the Tokenizer. /// /// If these tokens are already part of the vocabulary, it just let the Tokenizer know about /// them. If they don't exist, the Tokenizer creates them, giving them a new id. /// /// These special tokens will never be processed by the model (ie won't be split into /// multiple tokens), and they can be removed from the output when decoding. /// /// Args: /// tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): /// The list of special tokens we want to add to the vocabulary. Each token can either /// be a string or an instance of :class:`~tokenizers.AddedToken` for more /// customization. /// /// Returns: /// :obj:`int`: The number of tokens that were created in the vocabulary #[pyo3(text_signature = "(self, tokens)")] fn add_special_tokens(&mut self, tokens: &PyList) -> PyResult<usize> { let tokens = tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Input must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?; Ok(self.tokenizer.add_special_tokens(&tokens)) } /// Train the Tokenizer using the given files. /// /// Reads the files line by line, while keeping all the whitespace, even new lines. /// If you want to train from data store in-memory, you can check /// :meth:`~tokenizers.Tokenizer.train_from_iterator` /// /// Args: /// files (:obj:`List[str]`): /// A list of path to the files that we should use for training /// /// trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): /// An optional trainer that should be used to train our Model #[pyo3(signature = (files, trainer = None))] #[pyo3(text_signature = "(self, files, trainer = None)")] fn train(&mut self, files: Vec<String>, trainer: Option<&mut PyTrainer>) -> PyResult<()> { let mut trainer = trainer.map_or_else(|| self.tokenizer.get_model().get_trainer(), |t| t.clone()); Python::with_gil(|py| { py.allow_threads(|| { ToPyResult( self.tokenizer .train_from_files(&mut trainer, files) .map(|_| {}), ) .into() }) }) } /// Train the Tokenizer using the provided iterator. /// /// You can provide anything that is a Python Iterator /// /// * A list of sequences :obj:`List[str]` /// * A generator that yields :obj:`str` or :obj:`List[str]` /// * A Numpy array of strings /// * ... /// /// Args: /// iterator (:obj:`Iterator`): /// Any iterator over strings or list of strings /// /// trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): /// An optional trainer that should be used to train our Model /// /// length (:obj:`int`, `optional`): /// The total number of sequences in the iterator. This is used to /// provide meaningful progress tracking #[pyo3(signature = (iterator, trainer = None, length = None))] #[pyo3(text_signature = "(self, iterator, trainer=None, length=None)")] fn train_from_iterator( &mut self, py: Python, iterator: &PyAny, trainer: Option<&mut PyTrainer>, length: Option<usize>, ) -> PyResult<()> { let mut trainer = trainer.map_or_else(|| self.tokenizer.get_model().get_trainer(), |t| t.clone()); let buffered_iter = PyBufferedIterator::new( iterator, |element| { // Each element of the iterator can either be: // - An iterator, to allow batching // - A string if let Ok(s) = element.downcast::<PyString>() { itertools::Either::Right(std::iter::once(s.to_str().map(|s| s.to_owned()))) } else { match element.iter() { Ok(iter) => itertools::Either::Left( iter.map(|i| i?.extract::<String>()) .collect::<Vec<_>>() .into_iter(), ), Err(e) => itertools::Either::Right(std::iter::once(Err(e))), } } }, 256, )?; py.allow_threads(|| { ResultShunt::process(buffered_iter, |iter| { self.tokenizer .train(&mut trainer, MaybeSizedIterator::new(iter, length)) .map(|_| {}) .map_err(|e| exceptions::PyException::new_err(e.to_string())) })? }) } /// Apply all the post-processing steps to the given encodings. /// /// The various steps are: /// /// 1. Truncate according to the set truncation params (provided with /// :meth:`~tokenizers.Tokenizer.enable_truncation`) /// 2. Apply the :class:`~tokenizers.processors.PostProcessor` /// 3. Pad according to the set padding params (provided with /// :meth:`~tokenizers.Tokenizer.enable_padding`) /// /// Args: /// encoding (:class:`~tokenizers.Encoding`): /// The :class:`~tokenizers.Encoding` corresponding to the main sequence. /// /// pair (:class:`~tokenizers.Encoding`, `optional`): /// An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence. /// /// add_special_tokens (:obj:`bool`): /// Whether to add the special tokens /// /// Returns: /// :class:`~tokenizers.Encoding`: The final post-processed encoding #[pyo3(signature = (encoding, pair = None, add_special_tokens = true))] #[pyo3(text_signature = "(self, encoding, pair=None, add_special_tokens=True)")] fn post_process( &self, encoding: &PyEncoding, pair: Option<&PyEncoding>, add_special_tokens: bool, ) -> PyResult<PyEncoding> { ToPyResult( self.tokenizer .post_process( encoding.encoding.clone(), pair.map(|p| p.encoding.clone()), add_special_tokens, ) .map(|e| e.into()), ) .into() } /// The :class:`~tokenizers.models.Model` in use by the Tokenizer #[getter] fn get_model(&self, py: Python<'_>) -> PyResult<PyObject> { self.tokenizer.get_model().get_as_subtype(py) } /// Set the :class:`~tokenizers.models.Model` #[setter] fn set_model(&mut self, model: PyRef<PyModel>) { self.tokenizer.with_model(model.clone()); } /// The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer #[getter] fn get_normalizer(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(n) = self.tokenizer.get_normalizer() { n.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.normalizers.Normalizer` #[setter] fn set_normalizer(&mut self, normalizer: PyRef<PyNormalizer>) { self.tokenizer.with_normalizer(normalizer.clone()); } /// The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer #[getter] fn get_pre_tokenizer(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(pt) = self.tokenizer.get_pre_tokenizer() { pt.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.normalizers.Normalizer` #[setter] fn set_pre_tokenizer(&mut self, pretok: PyRef<PyPreTokenizer>) { self.tokenizer.with_pre_tokenizer(pretok.clone()); } /// The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer #[getter] fn get_post_processor(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(n) = self.tokenizer.get_post_processor() { n.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.processors.PostProcessor` #[setter] fn set_post_processor(&mut self, processor: PyRef<PyPostProcessor>) { self.tokenizer.with_post_processor(processor.clone()); } /// The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer #[getter] fn get_decoder(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(dec) = self.tokenizer.get_decoder() { dec.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.decoders.Decoder` #[setter] fn set_decoder(&mut self, decoder: PyRef<PyDecoder>) { self.tokenizer.with_decoder(decoder.clone()); } } #[cfg(test)] mod test { use super::*; use crate::models::PyModel; use crate::normalizers::{PyNormalizer, PyNormalizerTypeWrapper}; use std::sync::{Arc, RwLock}; use tempfile::NamedTempFile; use tk::normalizers::{Lowercase, NFKC}; #[test] fn serialize() { let mut tokenizer = Tokenizer::new(PyModel::from(BPE::default())); tokenizer.with_normalizer(PyNormalizer::new(PyNormalizerTypeWrapper::Sequence(vec![ Arc::new(RwLock::new(NFKC.into())), Arc::new(RwLock::new(Lowercase.into())), ]))); let tmp = NamedTempFile::new().unwrap().into_temp_path(); tokenizer.save(&tmp, false).unwrap(); Tokenizer::from_file(&tmp).unwrap(); } }
tokenizers/bindings/python/src/tokenizer.rs/0
{ "file_path": "tokenizers/bindings/python/src/tokenizer.rs", "repo_id": "tokenizers", "token_count": 26008 }
207
import json import pickle import pytest from tokenizers import Tokenizer from tokenizers.models import BPE from tokenizers.pre_tokenizers import ByteLevel as ByteLevelPreTokenizer from tokenizers.processors import ( BertProcessing, ByteLevel, PostProcessor, RobertaProcessing, Sequence, TemplateProcessing, ) from ..utils import data_dir, roberta_files class TestBertProcessing: def test_instantiate(self): processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1)) assert processor is not None assert isinstance(processor, PostProcessor) assert isinstance(processor, BertProcessing) assert isinstance( pickle.loads(pickle.dumps(BertProcessing(("[SEP]", 0), ("[CLS]", 1)))), BertProcessing, ) def test_processing(self): tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["[SEP]", "[CLS]"]) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.post_processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1)) output = tokenizer.encode("my name", "pair") assert output.tokens == ["[CLS]", "my", "name", "[SEP]", "pair", "[SEP]"] assert output.ids == [1, 2, 3, 0, 6, 0] class TestRobertaProcessing: def test_instantiate(self): processor = RobertaProcessing(("</s>", 1), ("<s>", 0)) assert processor is not None assert isinstance(processor, PostProcessor) assert isinstance(processor, RobertaProcessing) assert isinstance( pickle.loads(pickle.dumps(RobertaProcessing(("</s>", 1), ("<s>", 0)))), RobertaProcessing, ) def test_processing(self): tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["<s>", "</s>"]) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.post_processor = RobertaProcessing(("</s>", 1), ("<s>", 0)) output = tokenizer.encode("my name", "pair") assert output.tokens == ["<s>", "my", "name", "</s>", "</s>", "pair", "</s>"] assert output.ids == [0, 2, 3, 1, 1, 6, 1] class TestByteLevelProcessing: def test_instantiate(self): assert ByteLevel() is not None assert ByteLevel(trim_offsets=True) is not None assert isinstance(ByteLevel(), PostProcessor) assert isinstance(ByteLevel(), ByteLevel) assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel) def test_processing(self, roberta_files): # Deprecated in 0.9 with pytest.deprecated_call(): tokenizer = Tokenizer(BPE(roberta_files["vocab"], roberta_files["merges"])) tokenizer.pre_tokenizer = ByteLevelPreTokenizer(add_prefix_space=True) # Keeps original offsets output = tokenizer.encode("My name is John") assert output.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"] assert output.offsets == [(0, 2), (2, 7), (7, 10), (10, 15)] # Trims offsets when activated tokenizer.post_processor = ByteLevel(trim_offsets=True) output = tokenizer.encode("My name is John") assert output.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"] assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15)] def test_manual_reload(self): byte_level = ByteLevel() state = json.loads(byte_level.__getstate__()) reloaded = ByteLevel(**state) assert isinstance(reloaded, ByteLevel) class TestTemplateProcessing: def get_bert(self): return TemplateProcessing( single=["[CLS]", "$0", "[SEP]"], pair=["[CLS]", "$A", "[SEP]", "$B:1", "[SEP]:1"], special_tokens=[("[CLS]", 1), ("[SEP]", 0)], ) def get_roberta(self): return TemplateProcessing( single="<s> $0 </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[("<s>", 0), ("</s>", 1)], ) def get_t5_squad(self): # >>> from transformers import AutoTokenizer # >>> tok = AutoTokenizer.from_pretrained("t5-small") # >>> tok.tokenize("question: ") # ['▁question', ':'] # >>> tok.tokenize("context: ") # ['▁context', ':'] # >>> tok.encode("context: ") # [2625, 10] # >>> tok.encode("question: ") # [822, 10] return TemplateProcessing( single=["$0"], pair=["Q", "$A", "C", "$B"], special_tokens=[ { "id": "Q", "ids": [2625, 10], "tokens": ["_question", ":"], }, { "id": "C", "ids": [822, 10], "tokens": ["_context", ":"], }, ], ) def test_instantiate(self): bert = self.get_bert() assert bert is not None assert isinstance(bert, PostProcessor) assert isinstance(bert, TemplateProcessing) assert isinstance(pickle.loads(pickle.dumps(bert)), TemplateProcessing) # It is absolutely legal to have tokens with spaces in the name: processor = TemplateProcessing( single=["[ C L S ]", "Token with space"], special_tokens=[("[ C L S ]", 0), ("Token with space", 1)], ) # Sequence identifiers must be well formed: with pytest.raises(Exception, match="Cannot build Piece"): processor = TemplateProcessing(single="[CLS] $$ [SEP]") with pytest.raises(Exception, match="Cannot build Piece"): processor = TemplateProcessing(single="[CLS] $A: [SEP]") # Special tokens must be provided when used in template: with pytest.raises(Exception, match="Missing SpecialToken\\(s\\) with id\\(s\\)"): processor = TemplateProcessing(single=["[CLS]"]) def test_bert_parity(self): tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["[SEP]", "[CLS]"]) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.post_processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1)) original = tokenizer.encode("my name", "pair") tokenizer.post_processor = self.get_bert() template = tokenizer.encode("my name", "pair") assert original.ids == template.ids def test_roberta_parity(self): tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["<s>", "</s>"]) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.post_processor = RobertaProcessing(("</s>", 1), ("<s>", 0)) original = tokenizer.encode("my name is john", "pair") tokenizer.post_processor = self.get_roberta() template = tokenizer.encode("my name is john", "pair") assert original.ids == template.ids class TestSequenceProcessing: def test_sequence_processing(self): assert Sequence([]) is not None assert Sequence([ByteLevel()]) is not None assert isinstance(Sequence([]), PostProcessor) assert isinstance(Sequence([]), Sequence) serialized = pickle.dumps(Sequence([])) assert isinstance(pickle.loads(serialized), Sequence) def test_post_process(self): byte_level = ByteLevel(trim_offsets=True) template = TemplateProcessing( single=["[CLS]", "$0", "[SEP]"], pair=["[CLS]:0", "$A", "[SEP]:0", "$B:1", "[SEP]:1"], special_tokens=[("[CLS]", 1), ("[SEP]", 0)], ) tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["[SEP]", "[CLS]"]) tokenizer.add_tokens(["my", "name", "is", "Ġjohn", "pair"]) tokenizer.post_processor = template # Before the sequence original = tokenizer.encode("my name is Ġjohn") assert original.ids == [1, 2, 3, 4, 5, 0] assert original.type_ids == [0, 0, 0, 0, 0, 0] assert original.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (11, 16), (0, 0)] pair = tokenizer.encode("my name is Ġjohn", "pair") # assert pair.ids == [1, 2, 3, 4, 5, 0, 6, 0] assert pair.type_ids == [0, 0, 0, 0, 0, 0, 1, 1] assert pair.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (11, 16), (0, 0), (0, 4), (0, 0)] processor = Sequence([byte_level, template]) tokenizer.post_processor = processor original = tokenizer.encode("my name is Ġjohn") assert original.ids == [1, 2, 3, 4, 5, 0] assert original.type_ids == [0, 0, 0, 0, 0, 0] # Offsets ARE trimmed assert original.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (12, 16), (0, 0)] pair = tokenizer.encode("my name is Ġjohn", "pair") # assert pair.ids == [1, 2, 3, 4, 5, 0, 6, 0] assert pair.type_ids == [0, 0, 0, 0, 0, 0, 1, 1] assert pair.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (12, 16), (0, 0), (0, 4), (0, 0)]
tokenizers/bindings/python/tests/bindings/test_processors.py/0
{ "file_path": "tokenizers/bindings/python/tests/bindings/test_processors.py", "repo_id": "tokenizers", "token_count": 4132 }
208
## Requirements In order to generate the documentation, it is necessary to have a Python environment with the following: ```python pip install sphinx sphinx_rtd_theme setuptools_rust ``` It is also necessary to have the `tokenizers` library in this same environment, for Sphinx to generate all the API Reference and links properly. If you want to visualize the documentation with some modifications made to the Python bindings, make sure you build it from source. ## Building the documentation Once everything is setup, you can build the documentation automatically for all the languages using the following command in the `/docs` folder: ```bash make html_all ``` If you want to build only for a specific language, you can use: ```bash make html O="-t python" ``` (Replacing `python` by the target language among `rust`, `node`, and `python`) **NOTE** If you are making any structural change to the documentation, it is recommended to clean the build directory before rebuilding: ```bash make clean && make html_all ```
tokenizers/docs/README.md/0
{ "file_path": "tokenizers/docs/README.md", "repo_id": "tokenizers", "token_count": 266 }
209
# Installation <tokenizerslangcontent> <python> 🤗 Tokenizers is tested on Python 3.5+. You should install 🤗 Tokenizers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Create a virtual environment with the version of Python you're going to use and activate it. ## Installation with pip 🤗 Tokenizers can be installed using pip as follows: ```bash pip install tokenizers ``` ## Installation from sources To use this method, you need to have the Rust language installed. You can follow [the official guide](https://www.rust-lang.org/learn/get-started) for more information. If you are using a unix based OS, the installation should be as simple as running: ```bash curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh ``` Or you can easiy update it with the following command: ```bash rustup update ``` Once rust is installed, we can start retrieving the sources for 🤗 Tokenizers: ```bash git clone https://github.com/huggingface/tokenizers ``` Then we go into the python bindings folder: ```bash cd tokenizers/bindings/python ``` At this point you should have your [virtual environment]() already activated. In order to compile 🤗 Tokenizers, you need to: ```bash pip install -e . ``` </python> <rust> ## Crates.io 🤗 Tokenizers is available on [crates.io](https://crates.io/crates/tokenizers). You just need to add it to your `Cargo.toml`: ```bash cargo add tokenizers ``` </rust> <node> ## Installation with npm You can simply install 🤗 Tokenizers with npm using: ```bash npm install tokenizers ``` </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/installation.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/installation.mdx", "repo_id": "tokenizers", "token_count": 554 }
210