text
stringlengths
7
318k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
439
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True, stacklevel=2): from .. import __version__ deprecated_kwargs = take_from values = () if not isinstance(args[0], tuple): args = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__version__).base_version) >= version.parse(version_name): raise ValueError( f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" f" version {__version__} is >= {version_name}" ) warning = None if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(attribute),) warning = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." elif hasattr(deprecated_kwargs, attribute): values += (getattr(deprecated_kwargs, attribute),) warning = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." elif deprecated_kwargs is None: warning = f"`{attribute}` is deprecated and will be removed in version {version_name}." if warning is not None: warning = warning + " " if standard_warn else "" warnings.warn(warning + message, FutureWarning, stacklevel=stacklevel) if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0: call_frame = inspect.getouterframes(inspect.currentframe())[1] filename = call_frame.filename line_number = call_frame.lineno function = call_frame.function key, value = next(iter(deprecated_kwargs.items())) raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`") if len(values) == 0: return elif len(values) == 1: return values[0] return values
diffusers/src/diffusers/utils/deprecation_utils.py/0
{ "file_path": "diffusers/src/diffusers/utils/deprecation_utils.py", "repo_id": "diffusers", "token_count": 793 }
127
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import sys import tempfile import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuid4 from huggingface_hub import ( ModelCard, ModelCardData, create_repo, hf_hub_download, upload_folder, ) from huggingface_hub.constants import HF_HUB_CACHE, HF_HUB_DISABLE_TELEMETRY, HF_HUB_OFFLINE from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, validate_hf_hub_args, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger logger = get_logger(__name__) SESSION_ID = uuid4().hex def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: """ Formats a user-agent string with basic info about a request. """ ua = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" if HF_HUB_DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f"; torch/{_torch_version}" if is_flax_available(): ua += f"; jax/{_jax_version}" ua += f"; flax/{_flax_version}" if is_onnx_available(): ua += f"; onnxruntime/{_onnxruntime_version}" # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(user_agent, dict): ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) elif isinstance(user_agent, str): ua += "; " + user_agent return ua def load_or_create_model_card( repo_id_or_path: Optional[str] = None, token: Optional[str] = None, is_pipeline: bool = False ) -> ModelCard: """ Loads or creates a model card. Args: repo_id (`str`): The repo_id where to look for the model card. token (`str`, *optional*): Authentication token. Will default to the stored token. See https://huggingface.co/settings/token for more details. is_pipeline (`bool`, *optional*): Boolean to indicate if we're adding tag to a [`DiffusionPipeline`]. """ if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) try: # Check if the model card is present on the remote repo model_card = ModelCard.load(repo_id_or_path, token=token) except EntryNotFoundError: # Otherwise create a simple model card from template component = "pipeline" if is_pipeline else "model" model_description = f"This is the model card of a 🧨 diffusers {component} that has been pushed on the Hub. This model card has been automatically generated." card_data = ModelCardData() model_card = ModelCard.from_template(card_data, model_description=model_description) return model_card def populate_model_card(model_card: ModelCard) -> ModelCard: """Populates the `model_card` with library name.""" if model_card.data.library_name is None: model_card.data.library_name = "diffusers" return model_card def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str] = None): """ Extracts the commit hash from a resolved filename toward a cache file. """ if resolved_file is None or commit_hash is not None: return commit_hash resolved_file = str(Path(resolved_file).as_posix()) search = re.search(r"snapshots/([^/]+)/", resolved_file) if search is None: return None commit_hash = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. hf_cache_home = os.path.expanduser( os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface")) ) old_diffusers_cache = os.path.join(hf_cache_home, "diffusers") def move_cache(old_cache_dir: Optional[str] = None, new_cache_dir: Optional[str] = None) -> None: if new_cache_dir is None: new_cache_dir = HF_HUB_CACHE if old_cache_dir is None: old_cache_dir = old_diffusers_cache old_cache_dir = Path(old_cache_dir).expanduser() new_cache_dir = Path(new_cache_dir).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*"): if old_blob_path.is_file() and not old_blob_path.is_symlink(): new_blob_path = new_cache_dir / old_blob_path.relative_to(old_cache_dir) new_blob_path.parent.mkdir(parents=True, exist_ok=True) os.replace(old_blob_path, new_blob_path) try: os.symlink(new_blob_path, old_blob_path) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). cache_version_file = os.path.join(HF_HUB_CACHE, "version_diffusers_cache.txt") if not os.path.isfile(cache_version_file): cache_version = 0 else: with open(cache_version_file) as f: try: cache_version = int(f.read()) except ValueError: cache_version = 0 if cache_version < 1: old_cache_is_not_empty = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( "The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your " "existing cached models. This is a one-time operation, you can interrupt it or run it " "later by calling `diffusers.utils.hub_utils.move_cache()`." ) try: move_cache() except Exception as e: trace = "\n".join(traceback.format_tb(e.__traceback__)) logger.error( f"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " "file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole " "message and we will do our best to help." ) if cache_version < 1: try: os.makedirs(HF_HUB_CACHE, exist_ok=True) with open(cache_version_file, "w") as f: f.write("1") except Exception: logger.warning( f"There was a problem when trying to write in your cache folder ({HF_HUB_CACHE}). Please, ensure " "the directory exists and can be written to." ) def _add_variant(weights_name: str, variant: Optional[str] = None) -> str: if variant is not None: splits = weights_name.split(".") splits = splits[:-1] + [variant] + splits[-1:] weights_name = ".".join(splits) return weights_name @validate_hf_hub_args def _get_model_file( pretrained_model_name_or_path: Union[str, Path], *, weights_name: str, subfolder: Optional[str] = None, cache_dir: Optional[str] = None, force_download: bool = False, proxies: Optional[Dict] = None, resume_download: bool = False, local_files_only: bool = False, token: Optional[str] = None, user_agent: Optional[Union[Dict, str]] = None, revision: Optional[str] = None, commit_hash: Optional[str] = None, ): pretrained_model_name_or_path = str(pretrained_model_name_or_path) if os.path.isfile(pretrained_model_name_or_path): return pretrained_model_name_or_path elif os.path.isdir(pretrained_model_name_or_path): if os.path.isfile(os.path.join(pretrained_model_name_or_path, weights_name)): # Load from a PyTorch checkpoint model_file = os.path.join(pretrained_model_name_or_path, weights_name) return model_file elif subfolder is not None and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, weights_name) ): model_file = os.path.join(pretrained_model_name_or_path, subfolder, weights_name) return model_file else: raise EnvironmentError( f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__version__).base_version) >= version.parse("0.22.0") ): try: model_file = hf_hub_download( pretrained_model_name_or_path, filename=_add_variant(weights_name, revision), cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, subfolder=subfolder, revision=revision or commit_hash, ) warnings.warn( f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.", FutureWarning, ) return model_file except: # noqa: E722 warnings.warn( f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(weights_name, revision)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(weights_name, revision)}' so that the correct variant file can be added.", FutureWarning, ) try: # 2. Load model file as usual model_file = hf_hub_download( pretrained_model_name_or_path, filename=weights_name, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, subfolder=subfolder, revision=revision or commit_hash, ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " "this model name. Check the model page at " f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." ) except EntryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." ) except HTTPError as err: raise EnvironmentError( f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" ) except ValueError: raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" f" directory containing a file named {weights_name} or" " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing a file named {weights_name}" ) class PushToHubMixin: """ A Mixin to push a model, scheduler, or pipeline to the Hugging Face Hub. """ def _upload_folder( self, working_dir: Union[str, os.PathLike], repo_id: str, token: Optional[str] = None, commit_message: Optional[str] = None, create_pr: bool = False, ): """ Uploads all files in `working_dir` to `repo_id`. """ if commit_message is None: if "Model" in self.__class__.__name__: commit_message = "Upload model" elif "Scheduler" in self.__class__.__name__: commit_message = "Upload scheduler" else: commit_message = f"Upload {self.__class__.__name__}" logger.info(f"Uploading the files of {working_dir} to {repo_id}.") return upload_folder( repo_id=repo_id, folder_path=working_dir, token=token, commit_message=commit_message, create_pr=create_pr ) def push_to_hub( self, repo_id: str, commit_message: Optional[str] = None, private: Optional[bool] = None, token: Optional[str] = None, create_pr: bool = False, safe_serialization: bool = True, variant: Optional[str] = None, ) -> str: """ Upload model, scheduler, or pipeline files to the 🤗 Hugging Face Hub. Parameters: repo_id (`str`): The name of the repository you want to push your model, scheduler, or pipeline files to. It should contain your organization name when pushing to an organization. `repo_id` can also be a path to a local directory. commit_message (`str`, *optional*): Message to commit while pushing. Default to `"Upload {object}"`. private (`bool`, *optional*): Whether or not the repository created should be private. token (`str`, *optional*): The token to use as HTTP bearer authorization for remote files. The token generated when running `huggingface-cli login` (stored in `~/.huggingface`). create_pr (`bool`, *optional*, defaults to `False`): Whether or not to create a PR with the uploaded files or directly commit. safe_serialization (`bool`, *optional*, defaults to `True`): Whether or not to convert the model weights to the `safetensors` format. variant (`str`, *optional*): If specified, weights are saved in the format `pytorch_model.<variant>.bin`. Examples: ```python from diffusers import UNet2DConditionModel unet = UNet2DConditionModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="unet") # Push the `unet` to your namespace with the name "my-finetuned-unet". unet.push_to_hub("my-finetuned-unet") # Push the `unet` to an organization with the name "my-finetuned-unet". unet.push_to_hub("your-org/my-finetuned-unet") ``` """ repo_id = create_repo(repo_id, private=private, token=token, exist_ok=True).repo_id # Create a new empty model card and eventually tag it model_card = load_or_create_model_card(repo_id, token=token) model_card = populate_model_card(model_card) # Save all files. save_kwargs = {"safe_serialization": safe_serialization} if "Scheduler" not in self.__class__.__name__: save_kwargs.update({"variant": variant}) with tempfile.TemporaryDirectory() as tmpdir: self.save_pretrained(tmpdir, **save_kwargs) # Update model card if needed: model_card.save(os.path.join(tmpdir, "README.md")) return self._upload_folder( tmpdir, repo_id, token=token, commit_message=commit_message, create_pr=create_pr, )
diffusers/src/diffusers/utils/hub_utils.py/0
{ "file_path": "diffusers/src/diffusers/utils/hub_utils.py", "repo_id": "diffusers", "token_count": 8141 }
128
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import gc import os import random import tempfile import time import unittest import numpy as np import torch import torch.nn as nn from huggingface_hub.repocard import RepoCard from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, DiffusionPipeline, EulerDiscreteScheduler, PNDMScheduler, StableDiffusionInpaintPipeline, StableDiffusionPipeline, StableDiffusionXLControlNetPipeline, StableDiffusionXLPipeline, UNet2DConditionModel, UNet3DConditionModel, ) from diffusers.loaders import LoraLoaderMixin, StableDiffusionXLLoraLoaderMixin from diffusers.models.attention_processor import ( Attention, AttnProcessor, AttnProcessor2_0, XFormersAttnProcessor, ) from diffusers.models.lora import LoRALinearLayer from diffusers.training_utils import unet_lora_state_dict from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( deprecate_after_peft_backend, floats_tensor, load_image, nightly, require_torch_gpu, slow, torch_device, ) def text_encoder_attn_modules(text_encoder: nn.Module): """Fetches the attention modules from `text_encoder`.""" attn_modules = [] if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)): for i, layer in enumerate(text_encoder.text_model.encoder.layers): name = f"text_model.encoder.layers.{i}.self_attn" mod = layer.self_attn attn_modules.append((name, mod)) else: raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}") return attn_modules def text_encoder_lora_state_dict(text_encoder: nn.Module): """Returns the LoRA state dict of the `text_encoder`. Assumes that `_modify_text_encoder()` was already called on it.""" state_dict = {} for name, module in text_encoder_attn_modules(text_encoder): for k, v in module.q_proj.lora_linear_layer.state_dict().items(): state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v for k, v in module.k_proj.lora_linear_layer.state_dict().items(): state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v for k, v in module.v_proj.lora_linear_layer.state_dict().items(): state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v for k, v in module.out_proj.lora_linear_layer.state_dict().items(): state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v return state_dict def create_unet_lora_layers(unet: nn.Module, rank=4, mock_weights=True): """Creates and returns the LoRA state dict for the UNet.""" # So that we accidentally don't end up using the in-place modified UNet. unet_lora_parameters = [] for attn_processor_name, attn_processor in unet.attn_processors.items(): # Parse the attention module. attn_module = unet for n in attn_processor_name.split(".")[:-1]: attn_module = getattr(attn_module, n) # Set the `lora_layer` attribute of the attention-related matrices. attn_module.to_q.set_lora_layer( LoRALinearLayer( in_features=attn_module.to_q.in_features, out_features=attn_module.to_q.out_features, rank=rank, ) ) attn_module.to_k.set_lora_layer( LoRALinearLayer( in_features=attn_module.to_k.in_features, out_features=attn_module.to_k.out_features, rank=rank, ) ) attn_module.to_v.set_lora_layer( LoRALinearLayer( in_features=attn_module.to_v.in_features, out_features=attn_module.to_v.out_features, rank=rank, ) ) attn_module.to_out[0].set_lora_layer( LoRALinearLayer( in_features=attn_module.to_out[0].in_features, out_features=attn_module.to_out[0].out_features, rank=rank, ) ) if mock_weights: with torch.no_grad(): attn_module.to_q.lora_layer.up.weight += 1 attn_module.to_k.lora_layer.up.weight += 1 attn_module.to_v.lora_layer.up.weight += 1 attn_module.to_out[0].lora_layer.up.weight += 1 unet_lora_parameters.extend(attn_module.to_q.lora_layer.parameters()) unet_lora_parameters.extend(attn_module.to_k.lora_layer.parameters()) unet_lora_parameters.extend(attn_module.to_v.lora_layer.parameters()) unet_lora_parameters.extend(attn_module.to_out[0].lora_layer.parameters()) unet_lora_sd = unet_lora_state_dict(unet) # Unload LoRA. unet.unload_lora() return unet_lora_parameters, unet_lora_sd def create_3d_unet_lora_layers(unet: nn.Module, rank=4, mock_weights=True): """Creates and returns the LoRA state dict for the 3D UNet.""" for attn_processor_name in unet.attn_processors.keys(): has_cross_attention = attn_processor_name.endswith("attn2.processor") and not ( attn_processor_name.startswith("transformer_in") or "temp_attentions" in attn_processor_name.split(".") ) cross_attention_dim = unet.config.cross_attention_dim if has_cross_attention else None if attn_processor_name.startswith("mid_block"): hidden_size = unet.config.block_out_channels[-1] elif attn_processor_name.startswith("up_blocks"): block_id = int(attn_processor_name[len("up_blocks.")]) hidden_size = list(reversed(unet.config.block_out_channels))[block_id] elif attn_processor_name.startswith("down_blocks"): block_id = int(attn_processor_name[len("down_blocks.")]) hidden_size = unet.config.block_out_channels[block_id] elif attn_processor_name.startswith("transformer_in"): # Note that the `8 * ...` comes from: https://github.com/huggingface/diffusers/blob/7139f0e874f10b2463caa8cbd585762a309d12d6/src/diffusers/models/unet_3d_condition.py#L148 hidden_size = 8 * unet.config.attention_head_dim # Parse the attention module. attn_module = unet for n in attn_processor_name.split(".")[:-1]: attn_module = getattr(attn_module, n) attn_module.to_q.set_lora_layer( LoRALinearLayer( in_features=min(attn_module.to_q.in_features, hidden_size), out_features=attn_module.to_q.out_features if cross_attention_dim is None else max(attn_module.to_q.out_features, cross_attention_dim), rank=rank, ) ) attn_module.to_k.set_lora_layer( LoRALinearLayer( in_features=min(attn_module.to_k.in_features, hidden_size), out_features=attn_module.to_k.out_features if cross_attention_dim is None else max(attn_module.to_k.out_features, cross_attention_dim), rank=rank, ) ) attn_module.to_v.set_lora_layer( LoRALinearLayer( in_features=min(attn_module.to_v.in_features, hidden_size), out_features=attn_module.to_v.out_features if cross_attention_dim is None else max(attn_module.to_v.out_features, cross_attention_dim), rank=rank, ) ) attn_module.to_out[0].set_lora_layer( LoRALinearLayer( in_features=min(attn_module.to_out[0].in_features, hidden_size), out_features=attn_module.to_out[0].out_features if cross_attention_dim is None else max(attn_module.to_out[0].out_features, cross_attention_dim), rank=rank, ) ) if mock_weights: with torch.no_grad(): attn_module.to_q.lora_layer.up.weight += 1 attn_module.to_k.lora_layer.up.weight += 1 attn_module.to_v.lora_layer.up.weight += 1 attn_module.to_out[0].lora_layer.up.weight += 1 unet_lora_sd = unet_lora_state_dict(unet) # Unload LoRA. unet.unload_lora() return unet_lora_sd def set_lora_weights(lora_attn_parameters, randn_weight=False, var=1.0): """Randomizes the LoRA params if specified.""" if not isinstance(lora_attn_parameters, dict): with torch.no_grad(): for parameter in lora_attn_parameters: if randn_weight: parameter[:] = torch.randn_like(parameter) * var else: torch.zero_(parameter) else: if randn_weight: modified_state_dict = {k: torch.rand_like(v) * var for k, v in lora_attn_parameters.items()} else: modified_state_dict = {k: torch.zeros_like(v) * var for k, v in lora_attn_parameters.items()} return modified_state_dict def state_dicts_almost_equal(sd1, sd2): sd1 = dict(sorted(sd1.items())) sd2 = dict(sorted(sd2.items())) models_are_equal = True for ten1, ten2 in zip(sd1.values(), sd2.values()): if (ten1 - ten2).abs().max() > 1e-3: models_are_equal = False return models_are_equal @deprecate_after_peft_backend class LoraLoaderMixinTests(unittest.TestCase): lora_rank = 4 def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, steps_offset=1, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") unet_lora_raw_params, unet_lora_params = create_unet_lora_layers(unet, rank=self.lora_rank) text_encoder_lora_params = LoraLoaderMixin._modify_text_encoder( text_encoder, dtype=torch.float32, rank=self.lora_rank ) text_encoder_lora_params = text_encoder_lora_state_dict(text_encoder) # We call this to ensure that the effects of the in-place `_modify_text_encoder` have been erased. LoraLoaderMixin._remove_text_encoder_monkey_patch_classmethod(text_encoder) pipeline_components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } lora_components = { "unet_lora_raw_params": unet_lora_raw_params, "unet_lora_params": unet_lora_params, "text_encoder_lora_params": text_encoder_lora_params, } return pipeline_components, lora_components def get_dummy_inputs(self, with_generator=True): batch_size = 1 sequence_length = 10 num_channels = 4 sizes = (32, 32) generator = torch.manual_seed(0) noise = floats_tensor((batch_size, num_channels) + sizes) input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) pipeline_inputs = { "prompt": "A painting of a squirrel eating a burger", "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } if with_generator: pipeline_inputs.update({"generator": generator}) return noise, input_ids, pipeline_inputs # copied from: https://colab.research.google.com/gist/sayakpaul/df2ef6e1ae6d8c10a49d859883b10860/scratchpad.ipynb def get_dummy_tokens(self): max_seq_length = 77 inputs = torch.randint(2, 56, size=(1, max_seq_length), generator=torch.manual_seed(0)) prepared_inputs = {} prepared_inputs["input_ids"] = inputs return prepared_inputs def create_lora_weight_file(self, tmpdirname): _, lora_components = self.get_dummy_components() LoraLoaderMixin.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) @unittest.skipIf(not torch.cuda.is_available() or not is_xformers_available(), reason="xformers requires cuda") def test_stable_diffusion_xformers_attn_processors(self): # disable_full_determinism() device = "cuda" # ensure determinism for the device-dependent torch.Generator components, _ = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs() # run xformers attention sd_pipe.enable_xformers_memory_efficient_attention() image = sd_pipe(**inputs).images assert image.shape == (1, 64, 64, 3) def test_stable_diffusion_lora(self): components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.unet.set_default_attn_processor() # forward 1 _, _, inputs = self.get_dummy_inputs() output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] # set lora layers sd_pipe.unet.load_attn_procs(lora_components["unet_lora_params"]) # forward 2 _, _, inputs = self.get_dummy_inputs() output = sd_pipe(**inputs, cross_attention_kwargs={"scale": 0.0}) image = output.images image_slice_1 = image[0, -3:, -3:, -1] # forward 3 _, _, inputs = self.get_dummy_inputs() output = sd_pipe(**inputs, cross_attention_kwargs={"scale": 0.5}) image = output.images image_slice_2 = image[0, -3:, -3:, -1] assert np.abs(image_slice - image_slice_1).max() < 1e-2 assert np.abs(image_slice - image_slice_2).max() > 1e-2 def test_lora_save_load(self): pipeline_components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) _, _, pipeline_inputs = self.get_dummy_inputs() original_images = sd_pipe(**pipeline_inputs).images orig_image_slice = original_images[0, -3:, -3:, -1] with tempfile.TemporaryDirectory() as tmpdirname: LoraLoaderMixin.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) sd_pipe.load_lora_weights(tmpdirname) lora_images = sd_pipe(**pipeline_inputs).images lora_image_slice = lora_images[0, -3:, -3:, -1] # Outputs shouldn't match. self.assertFalse(torch.allclose(torch.from_numpy(orig_image_slice), torch.from_numpy(lora_image_slice))) def test_lora_save_load_no_safe_serialization(self): pipeline_components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) _, _, pipeline_inputs = self.get_dummy_inputs() original_images = sd_pipe(**pipeline_inputs).images orig_image_slice = original_images[0, -3:, -3:, -1] with tempfile.TemporaryDirectory() as tmpdirname: LoraLoaderMixin.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], safe_serialization=False, ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))) sd_pipe.load_lora_weights(tmpdirname) lora_images = sd_pipe(**pipeline_inputs).images lora_image_slice = lora_images[0, -3:, -3:, -1] # Outputs shouldn't match. self.assertFalse(torch.allclose(torch.from_numpy(orig_image_slice), torch.from_numpy(lora_image_slice))) def test_text_encoder_lora_monkey_patch(self): pipeline_components, _ = self.get_dummy_components() pipe = StableDiffusionPipeline(**pipeline_components) dummy_tokens = self.get_dummy_tokens() # inference without lora outputs_without_lora = pipe.text_encoder(**dummy_tokens)[0] assert outputs_without_lora.shape == (1, 77, 32) # monkey patch text_encoder_lora_params = pipe._modify_text_encoder(pipe.text_encoder, pipe.lora_scale) text_encoder_lora_params = set_lora_weights( text_encoder_lora_state_dict(pipe.text_encoder), randn_weight=False ) with tempfile.TemporaryDirectory() as tmpdirname: LoraLoaderMixin.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=None, text_encoder_lora_layers=text_encoder_lora_params, ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) pipe.load_lora_weights(tmpdirname) # inference with lora outputs_with_lora = pipe.text_encoder(**dummy_tokens)[0] assert outputs_with_lora.shape == (1, 77, 32) assert torch.allclose( outputs_without_lora, outputs_with_lora ), "lora_up_weight are all zero, so the lora outputs should be the same to without lora outputs" # monkey patch pipeline_components, _ = self.get_dummy_components() pipe = StableDiffusionPipeline(**pipeline_components) text_encoder_lora_params = pipe._modify_text_encoder(pipe.text_encoder, pipe.lora_scale) text_encoder_lora_params = set_lora_weights( text_encoder_lora_state_dict(pipe.text_encoder), randn_weight=True, var=0.1 ) with tempfile.TemporaryDirectory() as tmpdirname: LoraLoaderMixin.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=None, text_encoder_lora_layers=text_encoder_lora_params, ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) pipe.load_lora_weights(tmpdirname) # inference with lora outputs_with_lora = pipe.text_encoder(**dummy_tokens)[0] assert outputs_with_lora.shape == (1, 77, 32) assert not torch.allclose( outputs_without_lora, outputs_with_lora ), "lora_up_weight are not zero, so the lora outputs should be different to without lora outputs" def test_text_encoder_lora_remove_monkey_patch(self): pipeline_components, _ = self.get_dummy_components() pipe = StableDiffusionPipeline(**pipeline_components) dummy_tokens = self.get_dummy_tokens() # inference without lora outputs_without_lora = pipe.text_encoder(**dummy_tokens)[0] assert outputs_without_lora.shape == (1, 77, 32) # monkey patch params = pipe._modify_text_encoder(pipe.text_encoder, pipe.lora_scale) params = set_lora_weights(text_encoder_lora_state_dict(pipe.text_encoder), var=0.1, randn_weight=True) with tempfile.TemporaryDirectory() as tmpdirname: LoraLoaderMixin.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=None, text_encoder_lora_layers=params, ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) pipe.load_lora_weights(tmpdirname) # inference with lora outputs_with_lora = pipe.text_encoder(**dummy_tokens)[0] assert outputs_with_lora.shape == (1, 77, 32) assert not torch.allclose( outputs_without_lora, outputs_with_lora ), "lora outputs should be different to without lora outputs" # remove monkey patch pipe._remove_text_encoder_monkey_patch() # inference with removed lora outputs_without_lora_removed = pipe.text_encoder(**dummy_tokens)[0] assert outputs_without_lora_removed.shape == (1, 77, 32) assert torch.allclose( outputs_without_lora, outputs_without_lora_removed ), "remove lora monkey patch should restore the original outputs" def test_text_encoder_lora_scale(self): pipeline_components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) _, _, pipeline_inputs = self.get_dummy_inputs() with tempfile.TemporaryDirectory() as tmpdirname: LoraLoaderMixin.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) sd_pipe.load_lora_weights(tmpdirname) lora_images = sd_pipe(**pipeline_inputs).images lora_image_slice = lora_images[0, -3:, -3:, -1] lora_images_with_scale = sd_pipe(**pipeline_inputs, cross_attention_kwargs={"scale": 0.5}).images lora_image_with_scale_slice = lora_images_with_scale[0, -3:, -3:, -1] # Outputs shouldn't match. self.assertFalse( torch.allclose(torch.from_numpy(lora_image_slice), torch.from_numpy(lora_image_with_scale_slice)) ) def test_lora_unet_attn_processors(self): with tempfile.TemporaryDirectory() as tmpdirname: self.create_lora_weight_file(tmpdirname) pipeline_components, _ = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # check if vanilla attention processors are used for _, module in sd_pipe.unet.named_modules(): if isinstance(module, Attention): self.assertIsInstance(module.processor, (AttnProcessor, AttnProcessor2_0)) # load LoRA weight file sd_pipe.load_lora_weights(tmpdirname) # check if lora attention processors are used for _, module in sd_pipe.unet.named_modules(): if isinstance(module, Attention): self.assertIsNotNone(module.to_q.lora_layer) self.assertIsNotNone(module.to_k.lora_layer) self.assertIsNotNone(module.to_v.lora_layer) self.assertIsNotNone(module.to_out[0].lora_layer) def test_unload_lora_sd(self): pipeline_components, lora_components = self.get_dummy_components() _, _, pipeline_inputs = self.get_dummy_inputs(with_generator=False) sd_pipe = StableDiffusionPipeline(**pipeline_components) sd_pipe.unet.set_default_attn_processor() original_images = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images orig_image_slice = original_images[0, -3:, -3:, -1] with tempfile.TemporaryDirectory() as tmpdirname: LoraLoaderMixin.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) sd_pipe.load_lora_weights(tmpdirname) lora_images = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images lora_image_slice = lora_images[0, -3:, -3:, -1] # Unload LoRA parameters. sd_pipe.unload_lora_weights() original_images_two = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images orig_image_slice_two = original_images_two[0, -3:, -3:, -1] assert not np.allclose( orig_image_slice, lora_image_slice ), "LoRA parameters should lead to a different image slice." assert not np.allclose( orig_image_slice_two, lora_image_slice ), "LoRA parameters should lead to a different image slice." assert np.allclose( orig_image_slice, orig_image_slice_two, atol=1e-3 ), "Unloading LoRA parameters should lead to results similar to what was obtained with the pipeline without any LoRA parameters." @unittest.skipIf(torch_device != "cuda" or not is_xformers_available(), "This test is supposed to run on GPU") def test_lora_unet_attn_processors_with_xformers(self): with tempfile.TemporaryDirectory() as tmpdirname: self.create_lora_weight_file(tmpdirname) pipeline_components, _ = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # enable XFormers sd_pipe.enable_xformers_memory_efficient_attention() # check if xFormers attention processors are used for _, module in sd_pipe.unet.named_modules(): if isinstance(module, Attention): self.assertIsInstance(module.processor, XFormersAttnProcessor) # load LoRA weight file sd_pipe.load_lora_weights(tmpdirname) # check if lora attention processors are used for _, module in sd_pipe.unet.named_modules(): if isinstance(module, Attention): self.assertIsNotNone(module.to_q.lora_layer) self.assertIsNotNone(module.to_k.lora_layer) self.assertIsNotNone(module.to_v.lora_layer) self.assertIsNotNone(module.to_out[0].lora_layer) # unload lora weights sd_pipe.unload_lora_weights() # check if attention processors are reverted back to xFormers for _, module in sd_pipe.unet.named_modules(): if isinstance(module, Attention): self.assertIsInstance(module.processor, XFormersAttnProcessor) @unittest.skipIf(torch_device != "cuda" or not is_xformers_available(), "This test is supposed to run on GPU") def test_lora_save_load_with_xformers(self): pipeline_components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) _, _, pipeline_inputs = self.get_dummy_inputs() # enable XFormers sd_pipe.enable_xformers_memory_efficient_attention() original_images = sd_pipe(**pipeline_inputs).images orig_image_slice = original_images[0, -3:, -3:, -1] with tempfile.TemporaryDirectory() as tmpdirname: LoraLoaderMixin.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_layers"], ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) sd_pipe.load_lora_weights(tmpdirname) lora_images = sd_pipe(**pipeline_inputs).images lora_image_slice = lora_images[0, -3:, -3:, -1] # Outputs shouldn't match. self.assertFalse(torch.allclose(torch.from_numpy(orig_image_slice), torch.from_numpy(lora_image_slice))) @deprecate_after_peft_backend class SDInpaintLoraMixinTests(unittest.TestCase): lora_rank = 4 def get_dummy_inputs(self, device, seed=0, img_res=64, output_pil=True): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched if output_pil: # Get random floats in [0, 1] as image image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] mask_image = torch.ones_like(image) # Convert image and mask_image to [0, 255] image = 255 * image mask_image = 255 * mask_image # Convert to PIL image init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((img_res, img_res)) mask_image = Image.fromarray(np.uint8(mask_image)).convert("RGB").resize((img_res, img_res)) else: # Get random floats in [0, 1] as image with spatial size (img_res, img_res) image = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed)).to(device) # Convert image to [-1, 1] init_image = 2.0 * image - 1.0 mask_image = torch.ones((1, 1, img_res, img_res), device=device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") unet_lora_raw_params, unet_lora_params = create_unet_lora_layers(unet, rank=self.lora_rank) text_encoder_lora_params = StableDiffusionXLLoraLoaderMixin._modify_text_encoder( text_encoder, dtype=torch.float32, rank=self.lora_rank ) text_encoder_lora_params = set_lora_weights( text_encoder_lora_state_dict(text_encoder), randn_weight=True, var=0.1 ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } lora_components = { "unet_lora_raw_params": unet_lora_raw_params, "unet_lora_params": unet_lora_params, "text_encoder_lora_params": text_encoder_lora_params, } return components, lora_components def test_stable_diffusion_inpaint_lora(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.unet.set_default_attn_processor() # forward 1 inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] # set lora layers sd_pipe.unet.load_attn_procs(lora_components["unet_lora_params"]) # forward 2 inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs, cross_attention_kwargs={"scale": 0.0}) image = output.images image_slice_1 = image[0, -3:, -3:, -1] # forward 3 inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs, cross_attention_kwargs={"scale": 0.5}) image = output.images image_slice_2 = image[0, -3:, -3:, -1] assert np.abs(image_slice - image_slice_1).max() < 1e-2 assert np.abs(image_slice - image_slice_2).max() > 1e-2 @deprecate_after_peft_backend class SDXLLoraLoaderMixinTests(unittest.TestCase): lora_rank = 4 def get_dummy_components(self, modify_text_encoder=True): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") _, unet_lora_params = create_unet_lora_layers(unet, rank=self.lora_rank) if modify_text_encoder: _ = StableDiffusionXLLoraLoaderMixin._modify_text_encoder( text_encoder, dtype=torch.float32, rank=self.lora_rank ) text_encoder_lora_params = text_encoder_lora_state_dict(text_encoder) StableDiffusionXLLoraLoaderMixin._remove_text_encoder_monkey_patch_classmethod(text_encoder) _ = StableDiffusionXLLoraLoaderMixin._modify_text_encoder( text_encoder_2, dtype=torch.float32, rank=self.lora_rank ) text_encoder_two_lora_params = text_encoder_lora_state_dict(text_encoder_2) StableDiffusionXLLoraLoaderMixin._remove_text_encoder_monkey_patch_classmethod(text_encoder_2) else: text_encoder_lora_params = None text_encoder_two_lora_params = None pipeline_components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "image_encoder": None, "feature_extractor": None, } lora_components = { "unet_lora_params": unet_lora_params, "text_encoder_lora_params": text_encoder_lora_params, "text_encoder_two_lora_params": text_encoder_two_lora_params, } return pipeline_components, lora_components def get_dummy_inputs(self, with_generator=True): batch_size = 1 sequence_length = 10 num_channels = 4 sizes = (32, 32) generator = torch.manual_seed(0) noise = floats_tensor((batch_size, num_channels) + sizes) input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) pipeline_inputs = { "prompt": "A painting of a squirrel eating a burger", "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } if with_generator: pipeline_inputs.update({"generator": generator}) return noise, input_ids, pipeline_inputs def test_lora_save_load(self): pipeline_components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) _, _, pipeline_inputs = self.get_dummy_inputs() original_images = sd_pipe(**pipeline_inputs).images orig_image_slice = original_images[0, -3:, -3:, -1] with tempfile.TemporaryDirectory() as tmpdirname: StableDiffusionXLPipeline.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], text_encoder_2_lora_layers=lora_components["text_encoder_two_lora_params"], ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) sd_pipe.load_lora_weights(tmpdirname) lora_images = sd_pipe(**pipeline_inputs).images lora_image_slice = lora_images[0, -3:, -3:, -1] # Outputs shouldn't match. self.assertFalse(torch.allclose(torch.from_numpy(orig_image_slice), torch.from_numpy(lora_image_slice))) def test_unload_lora_sdxl(self): pipeline_components, lora_components = self.get_dummy_components() _, _, pipeline_inputs = self.get_dummy_inputs(with_generator=False) sd_pipe = StableDiffusionXLPipeline(**pipeline_components) sd_pipe.unet.set_default_attn_processor() original_images = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images orig_image_slice = original_images[0, -3:, -3:, -1] with tempfile.TemporaryDirectory() as tmpdirname: StableDiffusionXLPipeline.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], text_encoder_2_lora_layers=lora_components["text_encoder_two_lora_params"], ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) sd_pipe.load_lora_weights(tmpdirname) lora_images = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images lora_image_slice = lora_images[0, -3:, -3:, -1] # Unload LoRA parameters. sd_pipe.unload_lora_weights() original_images_two = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images orig_image_slice_two = original_images_two[0, -3:, -3:, -1] assert not np.allclose( orig_image_slice, lora_image_slice ), "LoRA parameters should lead to a different image slice." assert not np.allclose( orig_image_slice_two, lora_image_slice ), "LoRA parameters should lead to a different image slice." assert np.allclose( orig_image_slice, orig_image_slice_two, atol=1e-3 ), "Unloading LoRA parameters should lead to results similar to what was obtained with the pipeline without any LoRA parameters." def test_load_lora_locally(self): pipeline_components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdirname: StableDiffusionXLPipeline.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], text_encoder_2_lora_layers=lora_components["text_encoder_two_lora_params"], safe_serialization=False, ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))) sd_pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.bin")) sd_pipe.unload_lora_weights() def test_text_encoder_lora_state_dict_unchanged(self): pipeline_components, lora_components = self.get_dummy_components(modify_text_encoder=False) sd_pipe = StableDiffusionXLPipeline(**pipeline_components) text_encoder_1_sd_keys = sorted(sd_pipe.text_encoder.state_dict().keys()) text_encoder_2_sd_keys = sorted(sd_pipe.text_encoder_2.state_dict().keys()) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # Modify the text encoder. _ = StableDiffusionXLLoraLoaderMixin._modify_text_encoder( sd_pipe.text_encoder, dtype=torch.float32, rank=self.lora_rank ) lora_components["text_encoder_lora_params"] = set_lora_weights( text_encoder_lora_state_dict(sd_pipe.text_encoder), randn_weight=True, var=0.1 ) _ = StableDiffusionXLLoraLoaderMixin._modify_text_encoder( sd_pipe.text_encoder_2, dtype=torch.float32, rank=self.lora_rank ) lora_components["text_encoder_two_lora_params"] = set_lora_weights( text_encoder_lora_state_dict(sd_pipe.text_encoder_2), randn_weight=True, var=0.1 ) with tempfile.TemporaryDirectory() as tmpdirname: StableDiffusionXLPipeline.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], text_encoder_2_lora_layers=lora_components["text_encoder_two_lora_params"], safe_serialization=False, ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))) sd_pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.bin")) text_encoder_1_sd_keys_2 = sorted(sd_pipe.text_encoder.state_dict().keys()) text_encoder_2_sd_keys_2 = sorted(sd_pipe.text_encoder_2.state_dict().keys()) sd_pipe.unload_lora_weights() text_encoder_1_sd_keys_3 = sorted(sd_pipe.text_encoder.state_dict().keys()) text_encoder_2_sd_keys_3 = sorted(sd_pipe.text_encoder_2.state_dict().keys()) # default & unloaded LoRA weights should have identical state_dicts assert text_encoder_1_sd_keys == text_encoder_1_sd_keys_3 # default & loaded LoRA weights should NOT have identical state_dicts assert text_encoder_1_sd_keys != text_encoder_1_sd_keys_2 # default & unloaded LoRA weights should have identical state_dicts assert text_encoder_2_sd_keys == text_encoder_2_sd_keys_3 # default & loaded LoRA weights should NOT have identical state_dicts assert text_encoder_2_sd_keys != text_encoder_2_sd_keys_2 def test_load_lora_locally_safetensors(self): pipeline_components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdirname: StableDiffusionXLPipeline.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], text_encoder_2_lora_layers=lora_components["text_encoder_two_lora_params"], safe_serialization=True, ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) sd_pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) sd_pipe.unload_lora_weights() def test_lora_fuse_nan(self): pipeline_components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdirname: StableDiffusionXLPipeline.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], text_encoder_2_lora_layers=lora_components["text_encoder_two_lora_params"], safe_serialization=True, ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) sd_pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) # corrupt one LoRA weight with `inf` values with torch.no_grad(): sd_pipe.unet.mid_block.attentions[0].transformer_blocks[0].attn1.to_q.lora_layer.down.weight += float( "NaN" ) # with `safe_fusing=True` we should see an Error with self.assertRaises(ValueError): sd_pipe.fuse_lora(safe_fusing=True) # without we should not see an error, but every image will be black sd_pipe.fuse_lora(safe_fusing=False) out = sd_pipe("test", num_inference_steps=2, output_type="np").images assert np.isnan(out).all() def test_lora_fusion(self): pipeline_components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) _, _, pipeline_inputs = self.get_dummy_inputs(with_generator=False) original_images = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images orig_image_slice = original_images[0, -3:, -3:, -1] with tempfile.TemporaryDirectory() as tmpdirname: StableDiffusionXLPipeline.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], text_encoder_2_lora_layers=lora_components["text_encoder_two_lora_params"], safe_serialization=True, ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) sd_pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) sd_pipe.fuse_lora() lora_images = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images lora_image_slice = lora_images[0, -3:, -3:, -1] self.assertFalse(np.allclose(orig_image_slice, lora_image_slice, atol=1e-3)) def test_unfuse_lora(self): pipeline_components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.unet.set_default_attn_processor() _, _, pipeline_inputs = self.get_dummy_inputs(with_generator=False) original_images = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images orig_image_slice = original_images[0, -3:, -3:, -1] with tempfile.TemporaryDirectory() as tmpdirname: StableDiffusionXLPipeline.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], text_encoder_2_lora_layers=lora_components["text_encoder_two_lora_params"], safe_serialization=True, ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) sd_pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) sd_pipe.fuse_lora() lora_images = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images lora_image_slice = lora_images[0, -3:, -3:, -1] # Reverse LoRA fusion. sd_pipe.unfuse_lora() original_images = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images orig_image_slice_two = original_images[0, -3:, -3:, -1] assert not np.allclose( orig_image_slice, lora_image_slice ), "Fusion of LoRAs should lead to a different image slice." assert not np.allclose( orig_image_slice_two, lora_image_slice ), "Fusion of LoRAs should lead to a different image slice." assert np.allclose( orig_image_slice, orig_image_slice_two, atol=1e-3 ), "Reversing LoRA fusion should lead to results similar to what was obtained with the pipeline without any LoRA parameters." def test_lora_fusion_is_not_affected_by_unloading(self): pipeline_components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) _, _, pipeline_inputs = self.get_dummy_inputs(with_generator=False) _ = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images with tempfile.TemporaryDirectory() as tmpdirname: StableDiffusionXLPipeline.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], text_encoder_2_lora_layers=lora_components["text_encoder_two_lora_params"], safe_serialization=True, ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) sd_pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) sd_pipe.fuse_lora() lora_images = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images lora_image_slice = lora_images[0, -3:, -3:, -1] # Unload LoRA parameters. sd_pipe.unload_lora_weights() images_with_unloaded_lora = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images images_with_unloaded_lora_slice = images_with_unloaded_lora[0, -3:, -3:, -1] assert ( np.abs(lora_image_slice - images_with_unloaded_lora_slice).max() < 2e-1 ), "`unload_lora_weights()` should have not effect on the semantics of the results as the LoRA parameters were fused." def test_fuse_lora_with_different_scales(self): pipeline_components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) _, _, pipeline_inputs = self.get_dummy_inputs(with_generator=False) _ = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images with tempfile.TemporaryDirectory() as tmpdirname: StableDiffusionXLPipeline.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], text_encoder_2_lora_layers=lora_components["text_encoder_two_lora_params"], safe_serialization=True, ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) sd_pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) sd_pipe.fuse_lora(lora_scale=1.0) lora_images_scale_one = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images lora_image_slice_scale_one = lora_images_scale_one[0, -3:, -3:, -1] # Reverse LoRA fusion. sd_pipe.unfuse_lora() with tempfile.TemporaryDirectory() as tmpdirname: StableDiffusionXLPipeline.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], text_encoder_2_lora_layers=lora_components["text_encoder_two_lora_params"], safe_serialization=True, ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) sd_pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) sd_pipe.fuse_lora(lora_scale=0.5) lora_images_scale_0_5 = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images lora_image_slice_scale_0_5 = lora_images_scale_0_5[0, -3:, -3:, -1] assert not np.allclose( lora_image_slice_scale_one, lora_image_slice_scale_0_5, atol=1e-03 ), "Different LoRA scales should influence the outputs accordingly." def test_with_different_scales(self): pipeline_components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.unet.set_default_attn_processor() _, _, pipeline_inputs = self.get_dummy_inputs(with_generator=False) original_images = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images original_imagee_slice = original_images[0, -3:, -3:, -1] with tempfile.TemporaryDirectory() as tmpdirname: StableDiffusionXLPipeline.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], text_encoder_2_lora_layers=lora_components["text_encoder_two_lora_params"], safe_serialization=True, ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) sd_pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) lora_images_scale_one = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images lora_image_slice_scale_one = lora_images_scale_one[0, -3:, -3:, -1] lora_images_scale_0_5 = sd_pipe( **pipeline_inputs, generator=torch.manual_seed(0), cross_attention_kwargs={"scale": 0.5} ).images lora_image_slice_scale_0_5 = lora_images_scale_0_5[0, -3:, -3:, -1] lora_images_scale_0_0 = sd_pipe( **pipeline_inputs, generator=torch.manual_seed(0), cross_attention_kwargs={"scale": 0.0} ).images lora_image_slice_scale_0_0 = lora_images_scale_0_0[0, -3:, -3:, -1] assert not np.allclose( lora_image_slice_scale_one, lora_image_slice_scale_0_5, atol=1e-03 ), "Different LoRA scales should influence the outputs accordingly." assert np.allclose( original_imagee_slice, lora_image_slice_scale_0_0, atol=1e-03 ), "LoRA scale of 0.0 shouldn't be different from the results without LoRA." def test_with_different_scales_fusion_equivalence(self): pipeline_components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.unet.set_default_attn_processor() _, _, pipeline_inputs = self.get_dummy_inputs(with_generator=False) images = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images images_slice = images[0, -3:, -3:, -1] with tempfile.TemporaryDirectory() as tmpdirname: StableDiffusionXLPipeline.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], text_encoder_2_lora_layers=lora_components["text_encoder_two_lora_params"], safe_serialization=True, ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) sd_pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) lora_images_scale_0_5 = sd_pipe( **pipeline_inputs, generator=torch.manual_seed(0), cross_attention_kwargs={"scale": 0.5} ).images lora_image_slice_scale_0_5 = lora_images_scale_0_5[0, -3:, -3:, -1] sd_pipe.fuse_lora(lora_scale=0.5) lora_images_scale_0_5_fusion = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images lora_image_slice_scale_0_5_fusion = lora_images_scale_0_5_fusion[0, -3:, -3:, -1] assert np.allclose( lora_image_slice_scale_0_5, lora_image_slice_scale_0_5_fusion, atol=1e-03 ), "Fusion shouldn't affect the results when calling the pipeline with a non-default LoRA scale." sd_pipe.unfuse_lora() images_unfused = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images images_slice_unfused = images_unfused[0, -3:, -3:, -1] assert np.allclose(images_slice, images_slice_unfused, atol=1e-03), "Unfused should match no LoRA" assert not np.allclose( images_slice, lora_image_slice_scale_0_5, atol=1e-03 ), "0.5 scale and no scale shouldn't match" def test_save_load_fused_lora_modules(self): pipeline_components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**pipeline_components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) _, _, pipeline_inputs = self.get_dummy_inputs(with_generator=False) with tempfile.TemporaryDirectory() as tmpdirname: StableDiffusionXLPipeline.save_lora_weights( save_directory=tmpdirname, unet_lora_layers=lora_components["unet_lora_params"], text_encoder_lora_layers=lora_components["text_encoder_lora_params"], text_encoder_2_lora_layers=lora_components["text_encoder_two_lora_params"], safe_serialization=True, ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) sd_pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) sd_pipe.fuse_lora() lora_images_fusion = sd_pipe(**pipeline_inputs, generator=torch.manual_seed(0)).images lora_image_slice_fusion = lora_images_fusion[0, -3:, -3:, -1] with tempfile.TemporaryDirectory() as tmpdirname: sd_pipe.save_pretrained(tmpdirname) sd_pipe_loaded = StableDiffusionXLPipeline.from_pretrained(tmpdirname).to(torch_device) loaded_lora_images = sd_pipe_loaded(**pipeline_inputs, generator=torch.manual_seed(0)).images loaded_lora_image_slice = loaded_lora_images[0, -3:, -3:, -1] assert np.allclose( lora_image_slice_fusion, loaded_lora_image_slice, atol=1e-03 ), "The pipeline was serialized with LoRA parameters fused inside of the respected modules. The loaded pipeline should yield proper outputs, henceforth." @deprecate_after_peft_backend class UNet2DConditionLoRAModelTests(unittest.TestCase): model_class = UNet2DConditionModel main_input_name = "sample" lora_rank = 4 @property def dummy_input(self): batch_size = 4 num_channels = 4 sizes = (32, 32) noise = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) time_step = torch.tensor([10]).to(torch_device) encoder_hidden_states = floats_tensor((batch_size, 4, 32), rng=random.Random(0)).to(torch_device) return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states} @property def input_shape(self): return (4, 32, 32) @property def output_shape(self): return (4, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": (32, 64), "down_block_types": ("CrossAttnDownBlock2D", "DownBlock2D"), "up_block_types": ("UpBlock2D", "CrossAttnUpBlock2D"), "cross_attention_dim": 32, "attention_head_dim": 8, "out_channels": 4, "in_channels": 4, "layers_per_block": 2, "sample_size": 32, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_lora_at_different_scales(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) with torch.no_grad(): sample1 = model(**inputs_dict).sample _, lora_params = create_unet_lora_layers(model, rank=self.lora_rank) # make sure we can set a list of attention processors model.load_attn_procs(lora_params) model.to(torch_device) with torch.no_grad(): sample2 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.0}).sample sample3 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample sample4 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample assert (sample1 - sample2).abs().max() < 3e-3 assert (sample3 - sample4).abs().max() < 3e-3 # sample 2 and sample 3 should be different assert (sample2 - sample3).abs().max() > 1e-4 def test_lora_on_off(self, expected_max_diff=1e-3): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = (8, 16) torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) with torch.no_grad(): old_sample = model(**inputs_dict).sample _, lora_params = create_unet_lora_layers(model, rank=self.lora_rank) model.load_attn_procs(lora_params) with torch.no_grad(): sample = model(**inputs_dict, cross_attention_kwargs={"scale": 0.0}).sample # Unload LoRA. model.unload_lora() with torch.no_grad(): new_sample = model(**inputs_dict).sample max_diff_new_sample = (sample - new_sample).abs().max() max_diff_old_sample = (sample - old_sample).abs().max() assert max_diff_new_sample < expected_max_diff assert max_diff_old_sample < expected_max_diff @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_lora_xformers_on_off(self, expected_max_diff=6e-4): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = (8, 16) torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) _, lora_params = create_unet_lora_layers(model, rank=self.lora_rank) model.load_attn_procs(lora_params) # default with torch.no_grad(): sample = model(**inputs_dict).sample model.enable_xformers_memory_efficient_attention() on_sample = model(**inputs_dict).sample model.disable_xformers_memory_efficient_attention() off_sample = model(**inputs_dict).sample max_diff_on_sample = (sample - on_sample).abs().max() max_diff_off_sample = (sample - off_sample).abs().max() assert max_diff_on_sample < expected_max_diff assert max_diff_off_sample < expected_max_diff @deprecate_after_peft_backend class UNet3DConditionLoRAModelTests(unittest.TestCase): model_class = UNet3DConditionModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_channels = 4 num_frames = 4 sizes = (32, 32) noise = floats_tensor((batch_size, num_channels, num_frames) + sizes, rng=random.Random(0)).to(torch_device) time_step = torch.tensor([10]).to(torch_device) encoder_hidden_states = floats_tensor((batch_size, 4, 32), rng=random.Random(0)).to(torch_device) return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states} @property def input_shape(self): return (4, 4, 32, 32) @property def output_shape(self): return (4, 4, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": (32, 64), "down_block_types": ( "CrossAttnDownBlock3D", "DownBlock3D", ), "up_block_types": ("UpBlock3D", "CrossAttnUpBlock3D"), "cross_attention_dim": 32, "attention_head_dim": 8, "out_channels": 4, "in_channels": 4, "layers_per_block": 1, "sample_size": 32, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_lora_at_different_scales(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = 8 model = self.model_class(**init_dict) model.to(torch_device) with torch.no_grad(): sample1 = model(**inputs_dict).sample unet_lora_params = create_3d_unet_lora_layers(model) # make sure we can set a list of attention processors model.load_attn_procs(unet_lora_params) model.to(torch_device) with torch.no_grad(): sample2 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.0}).sample sample3 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample sample4 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample assert (sample1 - sample2).abs().max() < 3e-3 assert (sample3 - sample4).abs().max() < 3e-3 # sample 2 and sample 3 should be different assert (sample2 - sample3).abs().max() > 3e-3 @slow @deprecate_after_peft_backend @require_torch_gpu class LoraIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_dreambooth_old_format(self): generator = torch.Generator("cpu").manual_seed(0) lora_model_id = "hf-internal-testing/lora_dreambooth_dog_example" card = RepoCard.load(lora_model_id) base_model_id = card.data.to_dict()["base_model"] pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None) pipe = pipe.to(torch_device) pipe.load_lora_weights(lora_model_id) images = pipe( "A photo of a sks dog floating in the river", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.7207, 0.6787, 0.6010, 0.7478, 0.6838, 0.6064, 0.6984, 0.6443, 0.5785]) self.assertTrue(np.allclose(images, expected, atol=1e-4)) def test_dreambooth_text_encoder_new_format(self): generator = torch.Generator().manual_seed(0) lora_model_id = "hf-internal-testing/lora-trained" card = RepoCard.load(lora_model_id) base_model_id = card.data.to_dict()["base_model"] pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None) pipe = pipe.to(torch_device) pipe.load_lora_weights(lora_model_id) images = pipe("A photo of a sks dog", output_type="np", generator=generator, num_inference_steps=2).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.6628, 0.6138, 0.5390, 0.6625, 0.6130, 0.5463, 0.6166, 0.5788, 0.5359]) self.assertTrue(np.allclose(images, expected, atol=1e-4)) def test_a1111(self): generator = torch.Generator().manual_seed(0) pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/Counterfeit-V2.5", safety_checker=None).to( torch_device ) lora_model_id = "hf-internal-testing/civitai-light-shadow-lora" lora_filename = "light_and_shadow.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.3636, 0.3708, 0.3694, 0.3679, 0.3829, 0.3677, 0.3692, 0.3688, 0.3292]) self.assertTrue(np.allclose(images, expected, atol=1e-3)) def test_lycoris(self): generator = torch.Generator().manual_seed(0) pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/Amixx", safety_checker=None, use_safetensors=True, variant="fp16" ).to(torch_device) lora_model_id = "hf-internal-testing/edgLycorisMugler-light" lora_filename = "edgLycorisMugler-light.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.6463, 0.658, 0.599, 0.6542, 0.6512, 0.6213, 0.658, 0.6485, 0.6017]) self.assertTrue(np.allclose(images, expected, atol=1e-3)) def test_a1111_with_model_cpu_offload(self): generator = torch.Generator().manual_seed(0) pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/Counterfeit-V2.5", safety_checker=None) pipe.enable_model_cpu_offload() lora_model_id = "hf-internal-testing/civitai-light-shadow-lora" lora_filename = "light_and_shadow.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.3636, 0.3708, 0.3694, 0.3679, 0.3829, 0.3677, 0.3692, 0.3688, 0.3292]) self.assertTrue(np.allclose(images, expected, atol=1e-3)) def test_a1111_with_sequential_cpu_offload(self): generator = torch.Generator().manual_seed(0) pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/Counterfeit-V2.5", safety_checker=None) pipe.enable_sequential_cpu_offload() lora_model_id = "hf-internal-testing/civitai-light-shadow-lora" lora_filename = "light_and_shadow.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.3636, 0.3708, 0.3694, 0.3679, 0.3829, 0.3677, 0.3692, 0.3688, 0.3292]) self.assertTrue(np.allclose(images, expected, atol=1e-3)) def test_kohya_sd_v15_with_higher_dimensions(self): generator = torch.Generator().manual_seed(0) pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None).to( torch_device ) lora_model_id = "hf-internal-testing/urushisato-lora" lora_filename = "urushisato_v15.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.7165, 0.6616, 0.5833, 0.7504, 0.6718, 0.587, 0.6871, 0.6361, 0.5694]) self.assertTrue(np.allclose(images, expected, atol=1e-3)) def test_vanilla_funetuning(self): generator = torch.Generator().manual_seed(0) lora_model_id = "hf-internal-testing/sd-model-finetuned-lora-t4" card = RepoCard.load(lora_model_id) base_model_id = card.data.to_dict()["base_model"] pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None) pipe = pipe.to(torch_device) pipe.load_lora_weights(lora_model_id) images = pipe("A pokemon with blue eyes.", output_type="np", generator=generator, num_inference_steps=2).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.7406, 0.699, 0.5963, 0.7493, 0.7045, 0.6096, 0.6886, 0.6388, 0.583]) self.assertTrue(np.allclose(images, expected, atol=1e-4)) def test_unload_kohya_lora(self): generator = torch.manual_seed(0) prompt = "masterpiece, best quality, mountain" num_inference_steps = 2 pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None).to( torch_device ) initial_images = pipe( prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps ).images initial_images = initial_images[0, -3:, -3:, -1].flatten() lora_model_id = "hf-internal-testing/civitai-colored-icons-lora" lora_filename = "Colored_Icons_by_vizsumit.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) generator = torch.manual_seed(0) lora_images = pipe( prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps ).images lora_images = lora_images[0, -3:, -3:, -1].flatten() pipe.unload_lora_weights() generator = torch.manual_seed(0) unloaded_lora_images = pipe( prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps ).images unloaded_lora_images = unloaded_lora_images[0, -3:, -3:, -1].flatten() self.assertFalse(np.allclose(initial_images, lora_images)) self.assertTrue(np.allclose(initial_images, unloaded_lora_images, atol=1e-3)) def test_load_unload_load_kohya_lora(self): # This test ensures that a Kohya-style LoRA can be safely unloaded and then loaded # without introducing any side-effects. Even though the test uses a Kohya-style # LoRA, the underlying adapter handling mechanism is format-agnostic. generator = torch.manual_seed(0) prompt = "masterpiece, best quality, mountain" num_inference_steps = 2 pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None).to( torch_device ) initial_images = pipe( prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps ).images initial_images = initial_images[0, -3:, -3:, -1].flatten() lora_model_id = "hf-internal-testing/civitai-colored-icons-lora" lora_filename = "Colored_Icons_by_vizsumit.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) generator = torch.manual_seed(0) lora_images = pipe( prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps ).images lora_images = lora_images[0, -3:, -3:, -1].flatten() pipe.unload_lora_weights() generator = torch.manual_seed(0) unloaded_lora_images = pipe( prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps ).images unloaded_lora_images = unloaded_lora_images[0, -3:, -3:, -1].flatten() self.assertFalse(np.allclose(initial_images, lora_images)) self.assertTrue(np.allclose(initial_images, unloaded_lora_images, atol=1e-3)) # make sure we can load a LoRA again after unloading and they don't have # any undesired effects. pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) generator = torch.manual_seed(0) lora_images_again = pipe( prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps ).images lora_images_again = lora_images_again[0, -3:, -3:, -1].flatten() self.assertTrue(np.allclose(lora_images, lora_images_again, atol=1e-3)) def test_sdxl_0_9_lora_one(self): generator = torch.Generator().manual_seed(0) pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-0.9") lora_model_id = "hf-internal-testing/sdxl-0.9-daiton-lora" lora_filename = "daiton-xl-lora-test.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) pipe.enable_model_cpu_offload() images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.3838, 0.3482, 0.3588, 0.3162, 0.319, 0.3369, 0.338, 0.3366, 0.3213]) self.assertTrue(np.allclose(images, expected, atol=1e-3)) def test_sdxl_0_9_lora_two(self): generator = torch.Generator().manual_seed(0) pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-0.9") lora_model_id = "hf-internal-testing/sdxl-0.9-costumes-lora" lora_filename = "saijo.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) pipe.enable_model_cpu_offload() images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.3137, 0.3269, 0.3355, 0.255, 0.2577, 0.2563, 0.2679, 0.2758, 0.2626]) self.assertTrue(np.allclose(images, expected, atol=1e-3)) def test_sdxl_0_9_lora_three(self): generator = torch.Generator().manual_seed(0) pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-0.9") lora_model_id = "hf-internal-testing/sdxl-0.9-kamepan-lora" lora_filename = "kame_sdxl_v2-000020-16rank.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) pipe.enable_model_cpu_offload() images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.4015, 0.3761, 0.3616, 0.3745, 0.3462, 0.3337, 0.3564, 0.3649, 0.3468]) self.assertTrue(np.allclose(images, expected, atol=5e-3)) def test_sdxl_1_0_lora(self): generator = torch.Generator().manual_seed(0) pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") pipe.enable_model_cpu_offload() lora_model_id = "hf-internal-testing/sdxl-1.0-lora" lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.4468, 0.4087, 0.4134, 0.366, 0.3202, 0.3505, 0.3786, 0.387, 0.3535]) self.assertTrue(np.allclose(images, expected, atol=1e-4)) def test_sdxl_1_0_lora_fusion(self): generator = torch.Generator().manual_seed(0) pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") lora_model_id = "hf-internal-testing/sdxl-1.0-lora" lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) pipe.fuse_lora() pipe.enable_model_cpu_offload() images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() # This way we also test equivalence between LoRA fusion and the non-fusion behaviour. expected = np.array([0.4468, 0.4087, 0.4134, 0.366, 0.3202, 0.3505, 0.3786, 0.387, 0.3535]) self.assertTrue(np.allclose(images, expected, atol=1e-4)) def test_sdxl_1_0_lora_unfusion(self): generator = torch.Generator().manual_seed(0) pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") lora_model_id = "hf-internal-testing/sdxl-1.0-lora" lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) pipe.fuse_lora() pipe.enable_model_cpu_offload() images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images_with_fusion = images[0, -3:, -3:, -1].flatten() pipe.unfuse_lora() generator = torch.Generator().manual_seed(0) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images_without_fusion = images[0, -3:, -3:, -1].flatten() self.assertFalse(np.allclose(images_with_fusion, images_without_fusion, atol=1e-3)) def test_sdxl_1_0_lora_unfusion_effectivity(self): pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") pipe.enable_model_cpu_offload() generator = torch.Generator().manual_seed(0) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images original_image_slice = images[0, -3:, -3:, -1].flatten() lora_model_id = "hf-internal-testing/sdxl-1.0-lora" lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) pipe.fuse_lora() generator = torch.Generator().manual_seed(0) _ = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images pipe.unfuse_lora() generator = torch.Generator().manual_seed(0) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images_without_fusion_slice = images[0, -3:, -3:, -1].flatten() self.assertTrue(np.allclose(original_image_slice, images_without_fusion_slice, atol=1e-3)) def test_sdxl_1_0_lora_fusion_efficiency(self): generator = torch.Generator().manual_seed(0) lora_model_id = "hf-internal-testing/sdxl-1.0-lora" lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16) pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() start_time = time.time() for _ in range(3): pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images end_time = time.time() elapsed_time_non_fusion = end_time - start_time del pipe pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16) pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, torch_dtype=torch.float16) pipe.fuse_lora() pipe.enable_model_cpu_offload() generator = torch.Generator().manual_seed(0) start_time = time.time() for _ in range(3): pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images end_time = time.time() elapsed_time_fusion = end_time - start_time self.assertTrue(elapsed_time_fusion < elapsed_time_non_fusion) def test_sdxl_1_0_last_ben(self): generator = torch.Generator().manual_seed(0) pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") pipe.enable_model_cpu_offload() lora_model_id = "TheLastBen/Papercut_SDXL" lora_filename = "papercut.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) images = pipe("papercut.safetensors", output_type="np", generator=generator, num_inference_steps=2).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.5244, 0.4347, 0.4312, 0.4246, 0.4398, 0.4409, 0.4884, 0.4938, 0.4094]) self.assertTrue(np.allclose(images, expected, atol=1e-3)) def test_sdxl_1_0_fuse_unfuse_all(self): pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16) text_encoder_1_sd = copy.deepcopy(pipe.text_encoder.state_dict()) text_encoder_2_sd = copy.deepcopy(pipe.text_encoder_2.state_dict()) unet_sd = copy.deepcopy(pipe.unet.state_dict()) pipe.load_lora_weights( "davizca87/sun-flower", weight_name="snfw3rXL-000004.safetensors", torch_dtype=torch.float16 ) pipe.fuse_lora() pipe.unload_lora_weights() pipe.unfuse_lora() assert state_dicts_almost_equal(text_encoder_1_sd, pipe.text_encoder.state_dict()) assert state_dicts_almost_equal(text_encoder_2_sd, pipe.text_encoder_2.state_dict()) assert state_dicts_almost_equal(unet_sd, pipe.unet.state_dict()) def test_sdxl_1_0_lora_with_sequential_cpu_offloading(self): generator = torch.Generator().manual_seed(0) pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") pipe.enable_sequential_cpu_offload() lora_model_id = "hf-internal-testing/sdxl-1.0-lora" lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.4468, 0.4087, 0.4134, 0.366, 0.3202, 0.3505, 0.3786, 0.387, 0.3535]) self.assertTrue(np.allclose(images, expected, atol=1e-3)) def test_canny_lora(self): controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0") pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet ) pipe.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors") pipe.enable_sequential_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) prompt = "corgi" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images assert images[0].shape == (768, 512, 3) original_image = images[0, -3:, -3:, -1].flatten() expected_image = np.array([0.4574, 0.4461, 0.4435, 0.4462, 0.4396, 0.439, 0.4474, 0.4486, 0.4333]) assert np.allclose(original_image, expected_image, atol=1e-04) @nightly def test_sequential_fuse_unfuse(self): pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") # 1. round pipe.load_lora_weights("Pclanglais/TintinIA") pipe.fuse_lora() generator = torch.Generator().manual_seed(0) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images image_slice = images[0, -3:, -3:, -1].flatten() pipe.unfuse_lora() # 2. round pipe.load_lora_weights("ProomptEngineer/pe-balloon-diffusion-style") pipe.fuse_lora() pipe.unfuse_lora() # 3. round pipe.load_lora_weights("ostris/crayon_style_lora_sdxl") pipe.fuse_lora() pipe.unfuse_lora() # 4. back to 1st round pipe.load_lora_weights("Pclanglais/TintinIA") pipe.fuse_lora() generator = torch.Generator().manual_seed(0) images_2 = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images image_slice_2 = images_2[0, -3:, -3:, -1].flatten() self.assertTrue(np.allclose(image_slice, image_slice_2, atol=1e-3))
diffusers/tests/lora/test_lora_layers_old_backend.py/0
{ "file_path": "diffusers/tests/lora/test_lora_layers_old_backend.py", "repo_id": "diffusers", "token_count": 44656 }
129
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import UNet1DModel from diffusers.utils.testing_utils import ( backend_manual_seed, floats_tensor, slow, torch_device, ) from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin class UNet1DModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet1DModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_features = 14 seq_len = 16 noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) time_step = torch.tensor([10] * batch_size).to(torch_device) return {"sample": noise, "timestep": time_step} @property def input_shape(self): return (4, 14, 16) @property def output_shape(self): return (4, 14, 16) def test_ema_training(self): pass def test_training(self): pass def test_determinism(self): super().test_determinism() def test_outputs_equivalence(self): super().test_outputs_equivalence() def test_from_save_pretrained(self): super().test_from_save_pretrained() def test_from_save_pretrained_variant(self): super().test_from_save_pretrained_variant() def test_model_from_pretrained(self): super().test_model_from_pretrained() def test_output(self): super().test_output() def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": (32, 64, 128, 256), "in_channels": 14, "out_channels": 14, "time_embedding_type": "positional", "use_timestep_embedding": True, "flip_sin_to_cos": False, "freq_shift": 1.0, "out_block_type": "OutConv1DBlock", "mid_block_type": "MidResTemporalBlock1D", "down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), "up_block_types": ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D"), "act_fn": "swish", } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_from_pretrained_hub(self): model, loading_info = UNet1DModel.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="unet" ) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) image = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def test_output_pretrained(self): model = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32", subfolder="unet") torch.manual_seed(0) backend_manual_seed(torch_device, 0) num_features = model.config.in_channels seq_len = 16 noise = torch.randn((1, seq_len, num_features)).permute( 0, 2, 1 ) # match original, we can update values and remove time_step = torch.full((num_features,), 0) with torch.no_grad(): output = model(noise, time_step).sample.permute(0, 2, 1) output_slice = output[0, -3:, -3:].flatten() # fmt: off expected_output_slice = torch.tensor([-2.137172, 1.1426016, 0.3688687, -0.766922, 0.7303146, 0.11038864, -0.4760633, 0.13270172, 0.02591348]) # fmt: on self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) def test_forward_with_norm_groups(self): # Not implemented yet for this UNet pass @slow def test_unet_1d_maestro(self): model_id = "harmonai/maestro-150k" model = UNet1DModel.from_pretrained(model_id, subfolder="unet") model.to(torch_device) sample_size = 65536 noise = torch.sin(torch.arange(sample_size)[None, None, :].repeat(1, 2, 1)).to(torch_device) timestep = torch.tensor([1]).to(torch_device) with torch.no_grad(): output = model(noise, timestep).sample output_sum = output.abs().sum() output_max = output.abs().max() assert (output_sum - 224.0896).abs() < 0.5 assert (output_max - 0.0607).abs() < 4e-4 class UNetRLModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet1DModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_features = 14 seq_len = 16 noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) time_step = torch.tensor([10] * batch_size).to(torch_device) return {"sample": noise, "timestep": time_step} @property def input_shape(self): return (4, 14, 16) @property def output_shape(self): return (4, 14, 1) def test_determinism(self): super().test_determinism() def test_outputs_equivalence(self): super().test_outputs_equivalence() def test_from_save_pretrained(self): super().test_from_save_pretrained() def test_from_save_pretrained_variant(self): super().test_from_save_pretrained_variant() def test_model_from_pretrained(self): super().test_model_from_pretrained() def test_output(self): # UNetRL is a value-function is different output shape init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = torch.Size((inputs_dict["sample"].shape[0], 1)) self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_ema_training(self): pass def test_training(self): pass def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 14, "out_channels": 14, "down_block_types": ["DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"], "up_block_types": [], "out_block_type": "ValueFunction", "mid_block_type": "ValueFunctionMidBlock1D", "block_out_channels": [32, 64, 128, 256], "layers_per_block": 1, "downsample_each_block": True, "use_timestep_embedding": True, "freq_shift": 1.0, "flip_sin_to_cos": False, "time_embedding_type": "positional", "act_fn": "mish", } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_from_pretrained_hub(self): value_function, vf_loading_info = UNet1DModel.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" ) self.assertIsNotNone(value_function) self.assertEqual(len(vf_loading_info["missing_keys"]), 0) value_function.to(torch_device) image = value_function(**self.dummy_input) assert image is not None, "Make sure output is not None" def test_output_pretrained(self): value_function, vf_loading_info = UNet1DModel.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" ) torch.manual_seed(0) backend_manual_seed(torch_device, 0) num_features = value_function.config.in_channels seq_len = 14 noise = torch.randn((1, seq_len, num_features)).permute( 0, 2, 1 ) # match original, we can update values and remove time_step = torch.full((num_features,), 0) with torch.no_grad(): output = value_function(noise, time_step).sample # fmt: off expected_output_slice = torch.tensor([165.25] * seq_len) # fmt: on self.assertTrue(torch.allclose(output, expected_output_slice, rtol=1e-3)) def test_forward_with_norm_groups(self): # Not implemented yet for this UNet pass
diffusers/tests/models/unets/test_models_unet_1d.py/0
{ "file_path": "diffusers/tests/models/unets/test_models_unet_1d.py", "repo_id": "diffusers", "token_count": 3986 }
130
import pickle as pkl import unittest from dataclasses import dataclass from typing import List, Union import numpy as np import PIL.Image from diffusers.utils.outputs import BaseOutput from diffusers.utils.testing_utils import require_torch @dataclass class CustomOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] class ConfigTester(unittest.TestCase): def test_outputs_single_attribute(self): outputs = CustomOutput(images=np.random.rand(1, 3, 4, 4)) # check every way of getting the attribute assert isinstance(outputs.images, np.ndarray) assert outputs.images.shape == (1, 3, 4, 4) assert isinstance(outputs["images"], np.ndarray) assert outputs["images"].shape == (1, 3, 4, 4) assert isinstance(outputs[0], np.ndarray) assert outputs[0].shape == (1, 3, 4, 4) # test with a non-tensor attribute outputs = CustomOutput(images=[PIL.Image.new("RGB", (4, 4))]) # check every way of getting the attribute assert isinstance(outputs.images, list) assert isinstance(outputs.images[0], PIL.Image.Image) assert isinstance(outputs["images"], list) assert isinstance(outputs["images"][0], PIL.Image.Image) assert isinstance(outputs[0], list) assert isinstance(outputs[0][0], PIL.Image.Image) def test_outputs_dict_init(self): # test output reinitialization with a `dict` for compatibility with `accelerate` outputs = CustomOutput({"images": np.random.rand(1, 3, 4, 4)}) # check every way of getting the attribute assert isinstance(outputs.images, np.ndarray) assert outputs.images.shape == (1, 3, 4, 4) assert isinstance(outputs["images"], np.ndarray) assert outputs["images"].shape == (1, 3, 4, 4) assert isinstance(outputs[0], np.ndarray) assert outputs[0].shape == (1, 3, 4, 4) # test with a non-tensor attribute outputs = CustomOutput({"images": [PIL.Image.new("RGB", (4, 4))]}) # check every way of getting the attribute assert isinstance(outputs.images, list) assert isinstance(outputs.images[0], PIL.Image.Image) assert isinstance(outputs["images"], list) assert isinstance(outputs["images"][0], PIL.Image.Image) assert isinstance(outputs[0], list) assert isinstance(outputs[0][0], PIL.Image.Image) def test_outputs_serialization(self): outputs_orig = CustomOutput(images=[PIL.Image.new("RGB", (4, 4))]) serialized = pkl.dumps(outputs_orig) outputs_copy = pkl.loads(serialized) # Check original and copy are equal assert dir(outputs_orig) == dir(outputs_copy) assert dict(outputs_orig) == dict(outputs_copy) assert vars(outputs_orig) == vars(outputs_copy) @require_torch def test_torch_pytree(self): # ensure torch.utils._pytree treats ModelOutput subclasses as nodes (and not leaves) # this is important for DistributedDataParallel gradient synchronization with static_graph=True import torch import torch.utils._pytree data = np.random.rand(1, 3, 4, 4) x = CustomOutput(images=data) self.assertFalse(torch.utils._pytree._is_leaf(x)) expected_flat_outs = [data] expected_tree_spec = torch.utils._pytree.TreeSpec(CustomOutput, ["images"], [torch.utils._pytree.LeafSpec()]) actual_flat_outs, actual_tree_spec = torch.utils._pytree.tree_flatten(x) self.assertEqual(expected_flat_outs, actual_flat_outs) self.assertEqual(expected_tree_spec, actual_tree_spec) unflattened_x = torch.utils._pytree.tree_unflatten(actual_flat_outs, actual_tree_spec) self.assertEqual(x, unflattened_x)
diffusers/tests/others/test_outputs.py/0
{ "file_path": "diffusers/tests/others/test_outputs.py", "repo_id": "diffusers", "token_count": 1506 }
131
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTokenizer from transformers.models.blip_2.configuration_blip_2 import Blip2Config from transformers.models.clip.configuration_clip import CLIPTextConfig from diffusers import AutoencoderKL, BlipDiffusionPipeline, PNDMScheduler, UNet2DConditionModel from diffusers.utils.testing_utils import enable_full_determinism from src.diffusers.pipelines.blip_diffusion.blip_image_processing import BlipImageProcessor from src.diffusers.pipelines.blip_diffusion.modeling_blip2 import Blip2QFormerModel from src.diffusers.pipelines.blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class BlipDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = BlipDiffusionPipeline params = [ "prompt", "reference_image", "source_subject_category", "target_subject_category", ] batch_params = [ "prompt", "reference_image", "source_subject_category", "target_subject_category", ] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "neg_prompt", "guidance_scale", "prompt_strength", "prompt_reps", ] def get_dummy_components(self): torch.manual_seed(0) text_encoder_config = CLIPTextConfig( vocab_size=1000, hidden_size=16, intermediate_size=16, projection_dim=16, num_hidden_layers=1, num_attention_heads=1, max_position_embeddings=77, ) text_encoder = ContextCLIPTextModel(text_encoder_config) vae = AutoencoderKL( in_channels=4, out_channels=4, down_block_types=("DownEncoderBlock2D",), up_block_types=("UpDecoderBlock2D",), block_out_channels=(32,), layers_per_block=1, act_fn="silu", latent_channels=4, norm_num_groups=16, sample_size=16, ) blip_vision_config = { "hidden_size": 16, "intermediate_size": 16, "num_hidden_layers": 1, "num_attention_heads": 1, "image_size": 224, "patch_size": 14, "hidden_act": "quick_gelu", } blip_qformer_config = { "vocab_size": 1000, "hidden_size": 16, "num_hidden_layers": 1, "num_attention_heads": 1, "intermediate_size": 16, "max_position_embeddings": 512, "cross_attention_frequency": 1, "encoder_hidden_size": 16, } qformer_config = Blip2Config( vision_config=blip_vision_config, qformer_config=blip_qformer_config, num_query_tokens=16, tokenizer="hf-internal-testing/tiny-random-bert", ) qformer = Blip2QFormerModel(qformer_config) unet = UNet2DConditionModel( block_out_channels=(16, 32), norm_num_groups=16, layers_per_block=1, sample_size=16, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=16, ) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") scheduler = PNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=False, skip_prk_steps=True, ) vae.eval() qformer.eval() text_encoder.eval() image_processor = BlipImageProcessor() components = { "text_encoder": text_encoder, "vae": vae, "qformer": qformer, "unet": unet, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): np.random.seed(seed) reference_image = np.random.rand(32, 32, 3) * 255 reference_image = Image.fromarray(reference_image.astype("uint8")).convert("RGBA") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "swimming underwater", "generator": generator, "reference_image": reference_image, "source_subject_category": "dog", "target_subject_category": "dog", "height": 32, "width": 32, "guidance_scale": 7.5, "num_inference_steps": 2, "output_type": "np", } return inputs def test_blipdiffusion(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) image = pipe(**self.get_dummy_inputs(device))[0] image_slice = image[0, -3:, -3:, 0] assert image.shape == (1, 16, 16, 4) expected_slice = np.array([0.7096, 0.5900, 0.6703, 0.4032, 0.7766, 0.3629, 0.5447, 0.4149, 0.8172]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {image_slice.flatten()}, but got {image_slice.flatten()}"
diffusers/tests/pipelines/blipdiffusion/test_blipdiffusion.py/0
{ "file_path": "diffusers/tests/pipelines/blipdiffusion/test_blipdiffusion.py", "repo_id": "diffusers", "token_count": 3066 }
132
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from diffusers import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin from .test_kandinsky import Dummies from .test_kandinsky_img2img import Dummies as Img2ImgDummies from .test_kandinsky_inpaint import Dummies as InpaintDummies from .test_kandinsky_prior import Dummies as PriorDummies enable_full_determinism() class KandinskyPipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyCombinedPipeline params = [ "prompt", ] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = True def get_dummy_components(self): dummy = Dummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update( { "height": 64, "width": 64, } ) return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.0000, 0.0000, 0.6777, 0.1363, 0.3624, 0.7868, 0.3869, 0.3395, 0.5068]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=2e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) class KandinskyPipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyImg2ImgCombinedPipeline params = ["prompt", "image"] batch_params = ["prompt", "negative_prompt", "image"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummy = Img2ImgDummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() dummy = Img2ImgDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) inputs.pop("image_embeds") inputs.pop("negative_image_embeds") return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4260, 0.3596, 0.4571, 0.3890, 0.4087, 0.5137, 0.4819, 0.4116, 0.5053]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) class KandinskyPipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyInpaintCombinedPipeline params = ["prompt", "image", "mask_image"] batch_params = ["prompt", "negative_prompt", "image", "mask_image"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummy = InpaintDummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() dummy = InpaintDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) inputs.pop("image_embeds") inputs.pop("negative_image_embeds") return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.0477, 0.0808, 0.2972, 0.2705, 0.3620, 0.6247, 0.4464, 0.2870, 0.3530]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3)
diffusers/tests/pipelines/kandinsky/test_kandinsky_combined.py/0
{ "file_path": "diffusers/tests/pipelines/kandinsky/test_kandinsky_combined.py", "repo_id": "diffusers", "token_count": 5549 }
133
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNet2DModel from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch, torch_device enable_full_determinism() class PNDMPipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model def test_inference(self): unet = self.dummy_uncond_unet scheduler = PNDMScheduler() pndm = PNDMPipeline(unet=unet, scheduler=scheduler) pndm.to(torch_device) pndm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = pndm(generator=generator, num_inference_steps=20, output_type="numpy").images generator = torch.manual_seed(0) image_from_tuple = pndm(generator=generator, num_inference_steps=20, output_type="numpy", return_dict=False)[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @nightly @require_torch class PNDMPipelineIntegrationTests(unittest.TestCase): def test_inference_cifar10(self): model_id = "google/ddpm-cifar10-32" unet = UNet2DModel.from_pretrained(model_id) scheduler = PNDMScheduler() pndm = PNDMPipeline(unet=unet, scheduler=scheduler) pndm.to(torch_device) pndm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = pndm(generator=generator, output_type="numpy").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
diffusers/tests/pipelines/pndm/test_pndm.py/0
{ "file_path": "diffusers/tests/pipelines/pndm/test_pndm.py", "repo_id": "diffusers", "token_count": 1318 }
134
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel, logging, ) from diffusers.utils.testing_utils import ( CaptureLogger, backend_empty_cache, enable_full_determinism, load_numpy, nightly, numpy_cosine_similarity_distance, require_torch_accelerator, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusion2PipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): generator_device = "cpu" if not device.startswith("cuda") else "cuda" if not str(device).startswith("mps"): generator = torch.Generator(device=generator_device).manual_seed(seed) else: generator = torch.manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5753, 0.6113, 0.5005, 0.5036, 0.5464, 0.4725, 0.4982, 0.4865, 0.4861]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5121, 0.5714, 0.4827, 0.5057, 0.5646, 0.4766, 0.5189, 0.4895, 0.4990]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4865, 0.5439, 0.4840, 0.4995, 0.5543, 0.4846, 0.5199, 0.4942, 0.5061]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4864, 0.5440, 0.4842, 0.4994, 0.5543, 0.4846, 0.5196, 0.4942, 0.5063]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4865, 0.5439, 0.4840, 0.4995, 0.5543, 0.4846, 0.5199, 0.4942, 0.5061]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_unflawed(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = DDIMScheduler.from_config( components["scheduler"].config, timestep_spacing="trailing" ) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guidance_rescale"] = 0.7 inputs["num_inference_steps"] = 10 image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4736, 0.5405, 0.4705, 0.4955, 0.5675, 0.4812, 0.5310, 0.4967, 0.5064]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_long_prompt(self): components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) do_classifier_free_guidance = True negative_prompt = None num_images_per_prompt = 1 logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion") logger.setLevel(logging.WARNING) prompt = 25 * "@" with CaptureLogger(logger) as cap_logger_3: text_embeddings_3, negeative_text_embeddings_3 = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negeative_text_embeddings_3 is not None: text_embeddings_3 = torch.cat([negeative_text_embeddings_3, text_embeddings_3]) prompt = 100 * "@" with CaptureLogger(logger) as cap_logger: text_embeddings, negative_embeddings = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_embeddings is not None: text_embeddings = torch.cat([negative_embeddings, text_embeddings]) negative_prompt = "Hello" with CaptureLogger(logger) as cap_logger_2: text_embeddings_2, negative_text_embeddings_2 = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_text_embeddings_2 is not None: text_embeddings_2 = torch.cat([negative_text_embeddings_2, text_embeddings_2]) assert text_embeddings_3.shape == text_embeddings_2.shape == text_embeddings.shape assert text_embeddings.shape[1] == 77 assert cap_logger.out == cap_logger_2.out # 100 - 77 + 1 (BOS token) + 1 (EOS token) = 25 assert cap_logger.out.count("@") == 25 assert cap_logger_3.out == "" def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_accelerator @skip_mps class StableDiffusion2PipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): _generator_device = "cpu" if not generator_device.startswith("cuda") else "cuda" if not str(device).startswith("mps"): generator = torch.Generator(device=_generator_device).manual_seed(seed) else: generator = torch.manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_default_ddim(self): pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506]) assert np.abs(image_slice - expected_slice).max() < 7e-3 def test_stable_diffusion_pndm(self): pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base") pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506]) assert np.abs(image_slice - expected_slice).max() < 7e-3 def test_stable_diffusion_k_lms(self): pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base") pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.10440, 0.13115, 0.11100, 0.10141, 0.11440, 0.07215, 0.11332, 0.09693, 0.10006]) assert np.abs(image_slice - expected_slice).max() < 3e-3 @require_torch_gpu def test_stable_diffusion_attention_slicing(self): torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 ) pipe.unet.set_default_attn_processor() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # enable attention slicing pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) image_sliced = pipe(**inputs).images mem_bytes = torch.cuda.max_memory_allocated() torch.cuda.reset_peak_memory_stats() # make sure that less than 3.3 GB is allocated assert mem_bytes < 3.3 * 10**9 # disable slicing pipe.disable_attention_slicing() pipe.unet.set_default_attn_processor() inputs = self.get_inputs(torch_device, dtype=torch.float16) image = pipe(**inputs).images # make sure that more than 3.3 GB is allocated mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes > 3.3 * 10**9 max_diff = numpy_cosine_similarity_distance(image.flatten(), image_sliced.flatten()) assert max_diff < 5e-3 def test_stable_diffusion_text2img_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.3862, -0.4507, -1.1729, 0.0686, -1.1045, 0.7124, -1.8301, 0.1903, 1.2773] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [0.2720, -0.1863, -0.7383, -0.5029, -0.7534, 0.3970, -0.7646, 0.4468, 1.2686] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == inputs["num_inference_steps"] @require_torch_gpu def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.8 GB is allocated assert mem_bytes < 2.8 * 10**9 @require_torch_gpu def test_stable_diffusion_pipeline_with_model_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() inputs = self.get_inputs(torch_device, dtype=torch.float16) # Normal inference pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16, ) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) outputs = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # With model offloading # Reload but don't move to cuda pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16, ) pipe.unet.set_default_attn_processor() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device, dtype=torch.float16) outputs_offloaded = pipe(**inputs) mem_bytes_offloaded = torch.cuda.max_memory_allocated() images = outputs.images images_offloaded = outputs_offloaded.images max_diff = numpy_cosine_similarity_distance(images.flatten(), images_offloaded.flatten()) assert max_diff < 1e-3 assert mem_bytes_offloaded < mem_bytes assert mem_bytes_offloaded < 3 * 10**9 for module in pipe.text_encoder, pipe.unet, pipe.vae: assert module.device == torch.device("cpu") # With attention slicing torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe.enable_attention_slicing() _ = pipe(**inputs) mem_bytes_slicing = torch.cuda.max_memory_allocated() assert mem_bytes_slicing < mem_bytes_offloaded @nightly @require_torch_accelerator @skip_mps class StableDiffusion2PipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): _generator_device = "cpu" if not generator_device.startswith("cuda") else "cuda" if not str(device).startswith("mps"): generator = torch.Generator(device=_generator_device).manual_seed(seed) else: generator = torch.manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_2_0_default_ddim(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base").to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_0_base_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_2_1_default_pndm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_ddim(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_lms(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_euler(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_euler.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_dpm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config( sd_pipe.scheduler.config, final_sigmas_type="sigma_min" ) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_dpm_multi.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py", "repo_id": "diffusers", "token_count": 12007 }
135
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import random import unittest import numpy as np import torch from PIL import Image from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, LCMScheduler, StableDiffusionXLInpaintPipeline, UNet2DConditionModel, UniPCMultistepScheduler, ) from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionXLInpaintPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionXLInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( { "add_text_embeds", "add_time_ids", "mask", "masked_image_latents", } ) def get_dummy_components(self, skip_first_text_encoder=False, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, time_cond_proj_dim=time_cond_proj_dim, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=72, # 5 * 8 + 32 cross_attention_dim=64 if not skip_first_text_encoder else 32, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) image_encoder_config = CLIPVisionConfig( hidden_size=32, image_size=224, projection_dim=32, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) feature_extractor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder if not skip_first_text_encoder else None, "tokenizer": tokenizer if not skip_first_text_encoder else None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "image_encoder": image_encoder, "feature_extractor": feature_extractor, "requires_aesthetics_score": True, } return components def get_dummy_inputs(self, device, seed=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) # create mask image[8:, 8:, :] = 255 mask_image = Image.fromarray(np.uint8(image)).convert("L").resize((64, 64)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "strength": 1.0, "output_type": "np", } return inputs def get_dummy_inputs_2images(self, device, seed=0, img_res=64): # Get random floats in [0, 1] as image with spatial size (img_res, img_res) image1 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed)).to(device) image2 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed + 22)).to(device) # Convert images to [-1, 1] init_image1 = 2.0 * image1 - 1.0 init_image2 = 2.0 * image2 - 1.0 # empty mask mask_image = torch.zeros((1, 1, img_res, img_res), device=device) if str(device).startswith("mps"): generator1 = torch.manual_seed(seed) generator2 = torch.manual_seed(seed) else: generator1 = torch.Generator(device=device).manual_seed(seed) generator2 = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": ["A painting of a squirrel eating a burger"] * 2, "image": [init_image1, init_image2], "mask_image": [mask_image] * 2, "generator": [generator1, generator2], "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_components_function(self): init_components = self.get_dummy_components() init_components.pop("requires_aesthetics_score") pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) def test_stable_diffusion_xl_inpaint_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.8029, 0.5523, 0.5825, 0.6003, 0.6702, 0.7018, 0.6369, 0.5955, 0.5123]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_inpaint_euler_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6611, 0.5569, 0.5531, 0.5471, 0.5918, 0.6393, 0.5074, 0.5468, 0.5185]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_inpaint_euler_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6611, 0.5569, 0.5531, 0.5471, 0.5918, 0.6393, 0.5074, 0.5468, 0.5185]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests def test_save_load_optional_components(self): pass def test_stable_diffusion_xl_inpaint_negative_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt, negative_prompt=negative_prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_refiner(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(skip_first_text_encoder=True) sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.7045, 0.4838, 0.5454, 0.6270, 0.6168, 0.6717, 0.6484, 0.5681, 0.4922]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_two_xl_mixture_of_denoiser_fast(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() split_ts = num_train_timesteps - int(round(num_train_timesteps * split)) if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split_ts, expected_steps)) expected_steps = expected_steps_1 + expected_steps_2 else: expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) expected_steps_2 = list(filter(lambda ts: ts < split_ts, expected_steps)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = {**inputs, **{"denoising_end": split, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" inputs_2 = {**inputs, **{"denoising_start": split, "image": latents}} pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" for steps in [7, 20]: assert_run_mixture(steps, 0.33, EulerDiscreteScheduler) assert_run_mixture(steps, 0.33, HeunDiscreteScheduler) @slow def test_stable_diffusion_two_xl_mixture_of_denoiser(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() split_ts = num_train_timesteps - int(round(num_train_timesteps * split)) if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split_ts, expected_steps)) expected_steps = expected_steps_1 + expected_steps_2 else: expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) expected_steps_2 = list(filter(lambda ts: ts < split_ts, expected_steps)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = {**inputs, **{"denoising_end": split, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" inputs_2 = {**inputs, **{"denoising_start": split, "image": latents}} pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" for steps in [5, 8, 20]: for split in [0.33, 0.49, 0.71]: for scheduler_cls in [ DDIMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, UniPCMultistepScheduler, HeunDiscreteScheduler, ]: assert_run_mixture(steps, split, scheduler_cls) @slow def test_stable_diffusion_three_xl_mixture_of_denoiser(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() pipe_3 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_3.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split_1, split_2, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) pipe_3.scheduler = scheduler_cls.from_config(pipe_3.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() split_1_ts = num_train_timesteps - int(round(num_train_timesteps * split_1)) split_2_ts = num_train_timesteps - int(round(num_train_timesteps * split_2)) if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) expected_steps_2 = expected_steps_1[-1:] + list( filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps) ) expected_steps_3 = expected_steps_2[-1:] + list(filter(lambda ts: ts < split_2_ts, expected_steps)) expected_steps = expected_steps_1 + expected_steps_2 + expected_steps_3 else: expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) expected_steps_2 = list(filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps)) expected_steps_3 = list(filter(lambda ts: ts < split_2_ts, expected_steps)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = {**inputs, **{"denoising_end": split_1, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] assert ( expected_steps_1 == done_steps ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" inputs_2 = { **inputs, **{"denoising_start": split_1, "denoising_end": split_2, "image": latents, "output_type": "latent"}, } pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] inputs_3 = {**inputs, **{"denoising_start": split_2, "image": latents}} pipe_3(**inputs_3).images[0] assert expected_steps_3 == done_steps[len(expected_steps_1) + len(expected_steps_2) :] assert ( expected_steps == done_steps ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" for steps in [7, 11, 20]: for split_1, split_2 in zip([0.19, 0.32], [0.81, 0.68]): for scheduler_cls in [ DDIMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, UniPCMultistepScheduler, HeunDiscreteScheduler, ]: assert_run_mixture(steps, split_1, split_2, scheduler_cls) def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 def test_stable_diffusion_xl_img2img_negative_conditions(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_with_no_neg_conditions = image[0, -3:, -3:, -1] image = sd_pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=( 0, 0, ), negative_target_size=(1024, 1024), ).images image_slice_with_neg_conditions = image[0, -3:, -3:, -1] assert ( np.abs(image_slice_with_no_neg_conditions.flatten() - image_slice_with_neg_conditions.flatten()).max() > 1e-4 ) def test_stable_diffusion_xl_inpaint_mask_latents(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(device) sd_pipe.set_progress_bar_config(disable=None) # normal mask + normal image ## `image`: pil, `mask_image``: pil, `masked_image_latents``: None inputs = self.get_dummy_inputs(device) inputs["strength"] = 0.9 out_0 = sd_pipe(**inputs).images # image latents + mask latents inputs = self.get_dummy_inputs(device) image = sd_pipe.image_processor.preprocess(inputs["image"]).to(sd_pipe.device) mask = sd_pipe.mask_processor.preprocess(inputs["mask_image"]).to(sd_pipe.device) masked_image = image * (mask < 0.5) generator = torch.Generator(device=device).manual_seed(0) image_latents = sd_pipe._encode_vae_image(image, generator=generator) torch.randn((1, 4, 32, 32), generator=generator) mask_latents = sd_pipe._encode_vae_image(masked_image, generator=generator) inputs["image"] = image_latents inputs["masked_image_latents"] = mask_latents inputs["mask_image"] = mask inputs["strength"] = 0.9 generator = torch.Generator(device=device).manual_seed(0) torch.randn((1, 4, 32, 32), generator=generator) inputs["generator"] = generator out_1 = sd_pipe(**inputs).images assert np.abs(out_0 - out_1).max() < 1e-2 def test_stable_diffusion_xl_inpaint_2_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) # test to confirm if we pass two same image, we will get same output inputs = self.get_dummy_inputs(device) gen1 = torch.Generator(device=device).manual_seed(0) gen2 = torch.Generator(device=device).manual_seed(0) for name in ["prompt", "image", "mask_image"]: inputs[name] = [inputs[name]] * 2 inputs["generator"] = [gen1, gen2] images = sd_pipe(**inputs).images assert images.shape == (2, 64, 64, 3) image_slice1 = images[0, -3:, -3:, -1] image_slice2 = images[1, -3:, -3:, -1] assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() < 1e-4 # test to confirm that if we pass two different images, we will get different output inputs = self.get_dummy_inputs_2images(device) images = sd_pipe(**inputs).images assert images.shape == (2, 64, 64, 3) image_slice1 = images[0, -3:, -3:, -1] image_slice2 = images[1, -3:, -3:, -1] assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() > 1e-2 def test_pipeline_interrupt(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = "hey" num_inference_steps = 5 # store intermediate latents from the generation process class PipelineState: def __init__(self): self.state = [] def apply(self, pipe, i, t, callback_kwargs): self.state.append(callback_kwargs["latents"]) return callback_kwargs pipe_state = PipelineState() sd_pipe( prompt, image=inputs["image"], mask_image=inputs["mask_image"], strength=0.8, num_inference_steps=num_inference_steps, output_type="np", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=pipe_state.apply, ).images # interrupt generation at step index interrupt_step_idx = 1 def callback_on_step_end(pipe, i, t, callback_kwargs): if i == interrupt_step_idx: pipe._interrupt = True return callback_kwargs output_interrupted = sd_pipe( prompt, image=inputs["image"], mask_image=inputs["mask_image"], strength=0.8, num_inference_steps=num_inference_steps, output_type="latent", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=callback_on_step_end, ).images # fetch intermediate latents at the interrupted step # from the completed generation process intermediate_latent = pipe_state.state[interrupt_step_idx] # compare the intermediate latent to the output of the interrupted process # they should be the same assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4)
diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py", "repo_id": "diffusers", "token_count": 15654 }
136
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, TextToVideoSDPipeline, UNet3DConditionModel, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, numpy_cosine_similarity_distance, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class TextToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = TextToVideoSDPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet3DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock3D", "DownBlock3D"), up_block_types=("UpBlock3D", "CrossAttnUpBlock3D"), cross_attention_dim=4, attention_head_dim=4, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=(8,), in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D"], latent_channels=4, sample_size=32, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=4, intermediate_size=16, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def test_text_to_video_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = TextToVideoSDPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["output_type"] = "np" frames = sd_pipe(**inputs).frames image_slice = frames[0][0][-3:, -3:, -1] assert frames[0][0].shape == (32, 32, 3) expected_slice = np.array([0.7537, 0.1752, 0.6157, 0.5508, 0.4240, 0.4110, 0.4838, 0.5648, 0.5094]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != "cuda", reason="Feature isn't heavily used. Test in CUDA environment only.") def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False, expected_max_diff=3e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=1e-2) # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_consistent(self): pass # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_single_identical(self): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.") def test_num_images_per_prompt(self): pass def test_progress_bar(self): return super().test_progress_bar() @slow @skip_mps @require_torch_gpu class TextToVideoSDPipelineSlowTests(unittest.TestCase): def test_two_step_model(self): expected_video = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text-to-video/video_2step.npy" ) pipe = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b") pipe = pipe.to(torch_device) prompt = "Spiderman is surfing" generator = torch.Generator(device="cpu").manual_seed(0) video_frames = pipe(prompt, generator=generator, num_inference_steps=2, output_type="np").frames assert numpy_cosine_similarity_distance(expected_video.flatten(), video_frames.flatten()) < 1e-4 def test_two_step_model_with_freeu(self): expected_video = [] pipe = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b") pipe = pipe.to(torch_device) prompt = "Spiderman is surfing" generator = torch.Generator(device="cpu").manual_seed(0) pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) video_frames = pipe(prompt, generator=generator, num_inference_steps=2, output_type="np").frames video = video_frames[0, 0, -3:, -3:, -1].flatten() expected_video = [0.3643, 0.3455, 0.3831, 0.3923, 0.2978, 0.3247, 0.3278, 0.3201, 0.3475] assert np.abs(expected_video - video).mean() < 5e-2
diffusers/tests/pipelines/text_to_video_synthesis/test_text_to_video.py/0
{ "file_path": "diffusers/tests/pipelines/text_to_video_synthesis/test_text_to_video.py", "repo_id": "diffusers", "token_count": 3407 }
137
import torch from diffusers import DDIMInverseScheduler from .test_schedulers import SchedulerCommonTest class DDIMInverseSchedulerTest(SchedulerCommonTest): scheduler_classes = (DDIMInverseScheduler,) forward_default_kwargs = (("num_inference_steps", 50),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**kwargs) return config def full_loop(self, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for t in scheduler.timesteps: residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample return sample def test_timesteps(self): for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_steps_offset(self): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=steps_offset) scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(steps_offset=1) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(5) assert torch.equal(scheduler.timesteps, torch.LongTensor([1, 201, 401, 601, 801])) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(clip_sample=clip_sample) def test_timestep_spacing(self): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=timestep_spacing) def test_rescale_betas_zero_snr(self): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) def test_thresholding(self): self.check_over_configs(thresholding=False) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, ) def test_time_indices(self): for t in [1, 10, 49]: self.check_over_forward(time_step=t) def test_inference_steps(self): for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]): self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) def test_add_noise_device(self): pass def test_full_loop_no_noise(self): sample = self.full_loop() result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 671.6816) < 1e-2 assert abs(result_mean.item() - 0.8746) < 1e-3 def test_full_loop_with_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction") result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 1394.2185) < 1e-2 assert abs(result_mean.item() - 1.8154) < 1e-3 def test_full_loop_with_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 539.9622) < 1e-2 assert abs(result_mean.item() - 0.7031) < 1e-3 def test_full_loop_with_no_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 542.6722) < 1e-2 assert abs(result_mean.item() - 0.7066) < 1e-3
diffusers/tests/schedulers/test_scheduler_ddim_inverse.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_ddim_inverse.py", "repo_id": "diffusers", "token_count": 2258 }
138
import tempfile from typing import Dict, List, Tuple import torch from diffusers import LCMScheduler from diffusers.utils.testing_utils import torch_device from .test_schedulers import SchedulerCommonTest class LCMSchedulerTest(SchedulerCommonTest): scheduler_classes = (LCMScheduler,) forward_default_kwargs = (("num_inference_steps", 10),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.00085, "beta_end": 0.0120, "beta_schedule": "scaled_linear", "prediction_type": "epsilon", } config.update(**kwargs) return config @property def default_valid_timestep(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) scheduler_config = self.get_scheduler_config() scheduler = self.scheduler_classes[0](**scheduler_config) scheduler.set_timesteps(num_inference_steps) timestep = scheduler.timesteps[-1] return timestep def test_timesteps(self): for timesteps in [100, 500, 1000]: # 0 is not guaranteed to be in the timestep schedule, but timesteps - 1 is self.check_over_configs(time_step=timesteps - 1, num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(time_step=self.default_valid_timestep, beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear", "squaredcos_cap_v2"]: self.check_over_configs(time_step=self.default_valid_timestep, beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(time_step=self.default_valid_timestep, prediction_type=prediction_type) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(time_step=self.default_valid_timestep, clip_sample=clip_sample) def test_thresholding(self): self.check_over_configs(time_step=self.default_valid_timestep, thresholding=False) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( time_step=self.default_valid_timestep, thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, ) def test_time_indices(self): # Get default timestep schedule. kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) scheduler_config = self.get_scheduler_config() scheduler = self.scheduler_classes[0](**scheduler_config) scheduler.set_timesteps(num_inference_steps) timesteps = scheduler.timesteps for t in timesteps: self.check_over_forward(time_step=t) def test_inference_steps(self): # Hardcoded for now for t, num_inference_steps in zip([99, 39, 39, 19], [10, 25, 26, 50]): self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) # Override test_add_noise_device because the hardcoded num_inference_steps of 100 doesn't work # for LCMScheduler under default settings def test_add_noise_device(self, num_inference_steps=10): for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) sample = self.dummy_sample.to(torch_device) scaled_sample = scheduler.scale_model_input(sample, 0.0) self.assertEqual(sample.shape, scaled_sample.shape) noise = torch.randn_like(scaled_sample).to(torch_device) t = scheduler.timesteps[5][None] noised = scheduler.add_noise(scaled_sample, noise, t) self.assertEqual(noised.shape, scaled_sample.shape) # Override test_from_save_pretrained because it hardcodes a timestep of 1 def test_from_save_pretrained(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: timestep = self.default_valid_timestep scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) scheduler.set_timesteps(num_inference_steps) new_scheduler.set_timesteps(num_inference_steps) kwargs["generator"] = torch.manual_seed(0) output = scheduler.step(residual, timestep, sample, **kwargs).prev_sample kwargs["generator"] = torch.manual_seed(0) new_output = new_scheduler.step(residual, timestep, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" # Override test_step_shape because uses 0 and 1 as hardcoded timesteps def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample scheduler.set_timesteps(num_inference_steps) timestep_0 = scheduler.timesteps[-2] timestep_1 = scheduler.timesteps[-1] output_0 = scheduler.step(residual, timestep_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, timestep_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) # Override test_set_scheduler_outputs_equivalence since it uses 0 as a hardcoded timestep def test_scheduler_outputs_equivalence(self): def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", 50) timestep = self.default_valid_timestep for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample scheduler.set_timesteps(num_inference_steps) kwargs["generator"] = torch.manual_seed(0) outputs_dict = scheduler.step(residual, timestep, sample, **kwargs) scheduler.set_timesteps(num_inference_steps) kwargs["generator"] = torch.manual_seed(0) outputs_tuple = scheduler.step(residual, timestep, sample, return_dict=False, **kwargs) recursive_check(outputs_tuple, outputs_dict) def full_loop(self, num_inference_steps=10, seed=0, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(seed) scheduler.set_timesteps(num_inference_steps) for t in scheduler.timesteps: residual = model(sample, t) sample = scheduler.step(residual, t, sample, generator).prev_sample return sample def test_full_loop_onestep(self): sample = self.full_loop(num_inference_steps=1) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) # TODO: get expected sum and mean assert abs(result_sum.item() - 18.7097) < 1e-3 assert abs(result_mean.item() - 0.0244) < 1e-3 def test_full_loop_multistep(self): sample = self.full_loop(num_inference_steps=10) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) # TODO: get expected sum and mean assert abs(result_sum.item() - 197.7616) < 1e-3 assert abs(result_mean.item() - 0.2575) < 1e-3 def test_custom_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=timesteps) scheduler_timesteps = scheduler.timesteps for i, timestep in enumerate(scheduler_timesteps): if i == len(timesteps) - 1: expected_prev_t = -1 else: expected_prev_t = timesteps[i + 1] prev_t = scheduler.previous_timestep(timestep) prev_t = prev_t.item() self.assertEqual(prev_t, expected_prev_t) def test_custom_timesteps_increasing_order(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 51, 0] with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."): scheduler.set_timesteps(timesteps=timesteps) def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] num_inference_steps = len(timesteps) with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."): scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps) def test_custom_timesteps_too_large(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [scheduler.config.num_train_timesteps] with self.assertRaises( ValueError, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ): scheduler.set_timesteps(timesteps=timesteps)
diffusers/tests/schedulers/test_scheduler_lcm.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_lcm.py", "repo_id": "diffusers", "token_count": 5668 }
139
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that sorts the imports in the custom inits of Diffusers. Diffusers uses init files that delay the import of an object to when it's actually needed. This is to avoid the main init importing all models, which would make the line `import transformers` very slow when the user has all optional dependencies installed. The inits with delayed imports have two halves: one defining a dictionary `_import_structure` which maps modules to the name of the objects in each module, and one in `TYPE_CHECKING` which looks like a normal init for type-checkers. `isort` or `ruff` properly sort the second half which looks like traditionl imports, the goal of this script is to sort the first half. Use from the root of the repo with: ```bash python utils/custom_init_isort.py ``` which will auto-sort the imports (used in `make style`). For a check only (as used in `make quality`) run: ```bash python utils/custom_init_isort.py --check_only ``` """ import argparse import os import re from typing import Any, Callable, List, Optional # Path is defined with the intent you should run this script from the root of the repo. PATH_TO_TRANSFORMERS = "src/diffusers" # Pattern that looks at the indentation in a line. _re_indent = re.compile(r"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. _re_direct_key = re.compile(r'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. _re_indirect_key = re.compile(r'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. _re_strip_line = re.compile(r'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. _re_bracket_content = re.compile(r"\[([^\]]+)\]") def get_indent(line: str) -> str: """Returns the indent in given line (as string).""" search = _re_indent.search(line) return "" if search is None else search.groups()[0] def split_code_in_indented_blocks( code: str, indent_level: str = "", start_prompt: Optional[str] = None, end_prompt: Optional[str] = None ) -> List[str]: """ Split some code into its indented blocks, starting at a given level. Args: code (`str`): The code to split. indent_level (`str`): The indent level (as string) to use for identifying the blocks to split. start_prompt (`str`, *optional*): If provided, only starts splitting at the line where this text is. end_prompt (`str`, *optional*): If provided, stops splitting at a line where this text is. Warning: The text before `start_prompt` or after `end_prompt` (if provided) is not ignored, just not split. The input `code` can thus be retrieved by joining the result. Returns: `List[str]`: The list of blocks. """ # Let's split the code into lines and move to start_index. index = 0 lines = code.split("\n") if start_prompt is not None: while not lines[index].startswith(start_prompt): index += 1 blocks = ["\n".join(lines[:index])] else: blocks = [] # This variable contains the block treated at a given time. current_block = [lines[index]] index += 1 # We split into blocks until we get to the `end_prompt` (or the end of the file). while index < len(lines) and (end_prompt is None or not lines[index].startswith(end_prompt)): # We have a non-empty line with the proper indent -> start of a new block if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level: # Store the current block in the result and rest. There are two cases: the line is part of the block (like # a closing parenthesis) or not. if len(current_block) > 0 and get_indent(current_block[-1]).startswith(indent_level + " "): # Line is part of the current block current_block.append(lines[index]) blocks.append("\n".join(current_block)) if index < len(lines) - 1: current_block = [lines[index + 1]] index += 1 else: current_block = [] else: # Line is not part of the current block blocks.append("\n".join(current_block)) current_block = [lines[index]] else: # Just add the line to the current block current_block.append(lines[index]) index += 1 # Adds current block if it's nonempty. if len(current_block) > 0: blocks.append("\n".join(current_block)) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(lines): blocks.append("\n".join(lines[index:])) return blocks def ignore_underscore_and_lowercase(key: Callable[[Any], str]) -> Callable[[Any], str]: """ Wraps a key function (as used in a sort) to lowercase and ignore underscores. """ def _inner(x): return key(x).lower().replace("_", "") return _inner def sort_objects(objects: List[Any], key: Optional[Callable[[Any], str]] = None) -> List[Any]: """ Sort a list of objects following the rules of isort (all uppercased first, camel-cased second and lower-cased last). Args: objects (`List[Any]`): The list of objects to sort. key (`Callable[[Any], str]`, *optional*): A function taking an object as input and returning a string, used to sort them by alphabetical order. If not provided, will default to noop (so a `key` must be provided if the `objects` are not of type string). Returns: `List[Any]`: The sorted list with the same elements as in the inputs """ # If no key is provided, we use a noop. def noop(x): return x if key is None: key = noop # Constants are all uppercase, they go first. constants = [obj for obj in objects if key(obj).isupper()] # Classes are not all uppercase but start with a capital, they go second. classes = [obj for obj in objects if key(obj)[0].isupper() and not key(obj).isupper()] # Functions begin with a lowercase, they go last. functions = [obj for obj in objects if not key(obj)[0].isupper()] # Then we sort each group. key1 = ignore_underscore_and_lowercase(key) return sorted(constants, key=key1) + sorted(classes, key=key1) + sorted(functions, key=key1) def sort_objects_in_import(import_statement: str) -> str: """ Sorts the imports in a single import statement. Args: import_statement (`str`): The import statement in which to sort the imports. Returns: `str`: The same as the input, but with objects properly sorted. """ # This inner function sort imports between [ ]. def _replace(match): imports = match.groups()[0] # If there is one import only, nothing to do. if "," not in imports: return f"[{imports}]" keys = [part.strip().replace('"', "") for part in imports.split(",")] # We will have a final empty element if the line finished with a comma. if len(keys[-1]) == 0: keys = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(keys)]) + "]" lines = import_statement.split("\n") if len(lines) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. idx = 2 if lines[1].strip() == "[" else 1 keys_to_sort = [(i, _re_strip_line.search(line).groups()[0]) for i, line in enumerate(lines[idx:-idx])] sorted_indices = sort_objects(keys_to_sort, key=lambda x: x[1]) sorted_lines = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:]) elif len(lines) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1]) is not None: lines[1] = _re_bracket_content.sub(_replace, lines[1]) else: keys = [part.strip().replace('"', "") for part in lines[1].split(",")] # We will have a final empty element if the line finished with a comma. if len(keys[-1]) == 0: keys = keys[:-1] lines[1] = get_indent(lines[1]) + ", ".join([f'"{k}"' for k in sort_objects(keys)]) return "\n".join(lines) else: # Finally we have to deal with imports fitting on one line import_statement = _re_bracket_content.sub(_replace, import_statement) return import_statement def sort_imports(file: str, check_only: bool = True): """ Sort the imports defined in the `_import_structure` of a given init. Args: file (`str`): The path to the init to check/fix. check_only (`bool`, *optional*, defaults to `True`): Whether or not to just check (and not auto-fix) the init. """ with open(file, encoding="utf-8") as f: code = f.read() # If the file is not a custom init, there is nothing to do. if "_import_structure" not in code: return # Blocks of indent level 0 main_blocks = split_code_in_indented_blocks( code, start_prompt="_import_structure = {", end_prompt="if TYPE_CHECKING:" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1, len(main_blocks) - 1): # Check if the block contains some `_import_structure`s thingy to sort. block = main_blocks[block_idx] block_lines = block.split("\n") # Get to the start of the imports. line_idx = 0 while line_idx < len(block_lines) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: line_idx = len(block_lines) else: line_idx += 1 if line_idx >= len(block_lines): continue # Ignore beginning and last line: they don't contain anything. internal_block_code = "\n".join(block_lines[line_idx:-1]) indent = get_indent(block_lines[1]) # Slit the internal block into blocks of indent level 1. internal_blocks = split_code_in_indented_blocks(internal_block_code, indent_level=indent) # We have two categories of import key: list or _import_structure[key].append/extend pattern = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. keys = [(pattern.search(b).groups()[0] if pattern.search(b) is not None else None) for b in internal_blocks] # We only sort the lines with a key. keys_to_sort = [(i, key) for i, key in enumerate(keys) if key is not None] sorted_indices = [x[0] for x in sorted(keys_to_sort, key=lambda x: x[1])] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. count = 0 reordered_blocks = [] for i in range(len(internal_blocks)): if keys[i] is None: reordered_blocks.append(internal_blocks[i]) else: block = sort_objects_in_import(internal_blocks[sorted_indices[count]]) reordered_blocks.append(block) count += 1 # And we put our main block back together with its first and last line. main_blocks[block_idx] = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]]) if code != "\n".join(main_blocks): if check_only: return True else: print(f"Overwriting {file}.") with open(file, "w", encoding="utf-8") as f: f.write("\n".join(main_blocks)) def sort_imports_in_all_inits(check_only=True): """ Sort the imports defined in the `_import_structure` of all inits in the repo. Args: check_only (`bool`, *optional*, defaults to `True`): Whether or not to just check (and not auto-fix) the init. """ failures = [] for root, _, files in os.walk(PATH_TO_TRANSFORMERS): if "__init__.py" in files: result = sort_imports(os.path.join(root, "__init__.py"), check_only=check_only) if result: failures = [os.path.join(root, "__init__.py")] if len(failures) > 0: raise ValueError(f"Would overwrite {len(failures)} files, run `make style`.") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") args = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
diffusers/utils/custom_init_isort.py/0
{ "file_path": "diffusers/utils/custom_init_isort.py", "repo_id": "diffusers", "token_count": 5346 }
140
# Stable Diffusion Deep Dive <CourseFloatingBanner unit={3} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Stable Diffusion Deep Dive", value: "https://colab.research.google.com/github/huggingface/diffusion-models-class/blob/main/units/en/unit3/stable_diffusion_deep_dive.ipynb"}, {label: "Stable Diffusion Deep Dive", value: "https://studiolab.sagemaker.aws/import/github/huggingface/diffusion-models-class/blob/main/units/en/unit3/stable_diffusion_deep_dive.ipynb"}, ]} /> Stable Diffusion is a powerful text-to-image model. There are various websites and tools to make using it as easy as possible. It is also integrated into the Huggingface diffusers library where generating images can be as simple as: ```py from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, use_auth_token=True).to("cuda") image = pipe("An astronaught scuba diving").images[0] ``` In this notebook we're going to dig into the code behind these easy-to-use interfaces, to see what is going on under the hood. We'll begin by re-creating the functionality above as a scary chunk of code, and then one by one we'll inspect the different components and figure out what they do. By the end of this notebook that same sampling loop should feel like something you can tweak and modify as you like. ## Setup & Imports You'll need to log into huggingface and accept the terms of the licence for this model - see the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. And when you first run this notebook you need to uncomment the following two cells to install the requirements and log in to huggingface with an access token. ```py # !pip install -q --upgrade transformers diffusers ftfy ``` ```py from base64 import b64encode import numpy import torch from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel from huggingface_hub import notebook_login # For video display: from IPython.display import HTML from matplotlib import pyplot as plt from pathlib import Path from PIL import Image from torch import autocast from torchvision import transforms as tfms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer, logging torch.manual_seed(1) if not (Path.home()/'.huggingface'/'token').exists(): notebook_login() # Supress some unnecessary warnings when loading the CLIPTextModel logging.set_verbosity_error() # Set device torch_device = "cuda" if torch.cuda.is_available() else "cpu" ``` ## Loading the models This code (and that in the next section) comes from the [Huggingface example notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb). This will download and set up the relevant models and components we'll be using. Let's just run this for now and move on to the next section to check that it all works before diving deeper. If you've loaded a pipeline, you can also access these components using `pipe.unet`, `pipe.vae` and so on. In this notebook we aren't doing any memory-saving tricks - if you find yourself running out of GPU RAM, look at the pipeline code for inspiration with things like attention slicing, switching to half precision (fp16), keeping the VAE on the CPU and other modifications. ```py # Load the autoencoder model which will be used to decode the latents into image space. vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae") # Load the tokenizer and text encoder to tokenize and encode the text. tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14") # The UNet model for generating the latents. unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet") # The noise scheduler scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000) # To the GPU we go! vae = vae.to(torch_device) text_encoder = text_encoder.to(torch_device) unet = unet.to(torch_device); ``` ## A diffusion loop If all you want is to make a picture with some text, you could ignore this notebook and use one of the existing tools (such as [DreamStudio](https://beta.dreamstudio.ai/generate)) or use the simplified pipeline from huggingface, as documented [here](https://huggingface.co/blog/stable_diffusion). What we want to do in this notebook is dig a little deeper into how this works, so we'll start by checking that the example code runs. Again, this is adapted from the [HF notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb) and looks very similar to what you'll find if you inspect the [__call__() method](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L200) of the stable diffusion pipeline. ```py # Some settings prompt = ["A watercolor painting of an otter"] height = 512 # default height of Stable Diffusion width = 512 # default width of Stable Diffusion num_inference_steps = 30 # Number of denoising steps guidance_scale = 7.5 # Scale for classifier-free guidance generator = torch.manual_seed(32) # Seed generator to create the inital latent noise batch_size = 1 # Prep text text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") with torch.no_grad(): text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] max_length = text_input.input_ids.shape[-1] uncond_input = tokenizer( [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" ) with torch.no_grad(): uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # Prep Scheduler scheduler.set_timesteps(num_inference_steps) # Prep latents latents = torch.randn( (batch_size, unet.in_channels, height // 8, width // 8), generator=generator, ) latents = latents.to(torch_device) latents = latents * scheduler.init_noise_sigma # Scaling (previous versions did latents = latents * self.scheduler.sigmas[0] # Loop with autocast("cuda"): for i, t in tqdm(enumerate(scheduler.timesteps)): # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. latent_model_input = torch.cat([latents] * 2) sigma = scheduler.sigmas[i] # Scale the latents (preconditioning): # latent_model_input = latent_model_input / ((sigma**2 + 1) ** 0.5) # Diffusers 0.3 and below latent_model_input = scheduler.scale_model_input(latent_model_input, t) # predict the noise residual with torch.no_grad(): noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # perform guidance noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 # latents = scheduler.step(noise_pred, i, latents)["prev_sample"] # Diffusers 0.3 and below latents = scheduler.step(noise_pred, t, latents).prev_sample # scale and decode the image latents with vae latents = 1 / 0.18215 * latents with torch.no_grad(): image = vae.decode(latents).sample # Display image = (image / 2 + 0.5).clamp(0, 1) image = image.detach().cpu().permute(0, 2, 3, 1).numpy() images = (image * 255).round().astype("uint8") pil_images = [Image.fromarray(image) for image in images] pil_images[0] ``` It's working, but that's quite a bit of code! Let's look at the components one by one. ## The Autoencoder (AE) The AE can 'encode' an image into some sort of latent representation, and decode this back into an image. I've wrapped the code for this into a couple of functions here so we can see what this looks like in action: ```py def pil_to_latent(input_im): # Single image -> single latent in a batch (so size 1, 4, 64, 64) with torch.no_grad(): latent = vae.encode(tfms.ToTensor()(input_im).unsqueeze(0).to(torch_device)*2-1) # Note scaling return 0.18215 * latent.latent_dist.sample() def latents_to_pil(latents): # bath of latents -> list of images latents = (1 / 0.18215) * latents with torch.no_grad(): image = vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) image = image.detach().cpu().permute(0, 2, 3, 1).numpy() images = (image * 255).round().astype("uint8") pil_images = [Image.fromarray(image) for image in images] return pil_images ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> We'll use a pic from the web here, but you can load your own instead by uploading it and editing the filename in the next cell. ```py # Download a demo Image !curl --output macaw.jpg 'https://lafeber.com/pet-birds/wp-content/uploads/2018/06/Scarlet-Macaw-2.jpg' ``` ```py % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 62145 100 62145 0 0 10874 0 0:00:05 0:00:05 --:--:-- 15633 ``` ```py # Load the image with PIL input_image = Image.open('macaw.jpg').resize((512, 512)) input_image ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> Encoding this into the latent space of the AE with the function defined above looks like this: ```py # Encode to the latent space encoded = pil_to_latent(input_image) encoded.shape ``` ```py torch.Size([1, 4, 64, 64]) ``` ```py # Let's visualize the four channels of this latent representation: fig, axs = plt.subplots(1, 4, figsize=(16, 4)) for c in range(4): axs[c].imshow(encoded[0][c].cpu(), cmap='Greys') ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> This 4x64x64 tensor captures lots of information about the image, hopefully enough that when we feed it through the decoder we get back something very close to our input image: ```py # Decode this latent representation back into an image decoded = latents_to_pil(encoded)[0] decoded ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> You'll see some small differences if you squint! Forcus on the eye if you can't see anything obvious. This is pretty impressive - that 4x64x64 latent seems to hold a lot more information that a 64px image... This autoencoder has been trained to squish down an image to a smaller representation and then re-create the image back from this compressed version again. In this particular case the compression factor is 48, we start with a 3x512x512(chxhtxwd) image and it get compressed to a latent vector 4x64x64. Each 3x8x8 pixel volume in the input image gets compressed down to just 4 numbers(4x1x1). You can find AEs with a higher compression ratio (eg f16 like some popular VQGAN models) but at some point they begin to introduce artifacts that we don't want. Why do we even use an autoencoder? We can do diffusion in pixel space - where the model gets all the image data as inputs and produces an output prediction of the same shape. But this means processing a LOT of data, and make high-resolution generation very computationally expensive. Some solutions to this involve doing diffusion at low resolution (64px for eg) and then training a separate model to upscale repeatedly (as with D2/Imagen). But latent diffusion instead does the diffusion process in this 'latent space', using the compressed representations from our AE rather than raw images. These representations are information rich, and can be small enough to handle manageably on consumer hardware. Once we've generated a new 'image' as a latent representation, the autoencoder can take those final latent outputs and turn them into actual pixels. ## The Scheduler Now we need to talk about adding noise... During training, we add some noise to an image an then have the model try to predict the noise. If we always added a ton of noise, the model might not have much to work with. If we only add a tiny amount, the model won't be able to do much with the random starting points we use for sampling. So during training the amount is varied, according to some distribution. During sampling, we want to 'denoise' over a number of steps. How many steps and how much noise we should aim for at each step are going to affect the final result. The scheduler is in charge of handling all of these details. For example: `scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)` sets up a scheduler that matches the one used to train this model. When we want to sample over a smaller number of steps, we set this up with scheduler.set_timesteps: ```py # Setting the number of sampling steps: scheduler.set_timesteps(15) ``` You can see how our new set of steps corresponds to those used in training: ```py # See these in terms of the original 1000 steps used for training: print(scheduler.timesteps) ``` ```py tensor([999.0000, 927.6429, 856.2857, 784.9286, 713.5714, 642.2143, 570.8571, 499.5000, 428.1429, 356.7857, 285.4286, 214.0714, 142.7143, 71.3571, 0.0000], dtype=torch.float64) ``` And how much noise is present at each: ```py # Look at the equivalent noise levels: print(scheduler.sigmas) ``` ```py tensor([14.6146, 9.6826, 6.6780, 4.7746, 3.5221, 2.6666, 2.0606, 1.6156, 1.2768, 1.0097, 0.7913, 0.6056, 0.4397, 0.2780, 0.0292, 0.0000]) ``` During sampling, we'll start at a high noise level (in fact, our input will be pure noise) and gradually 'denoise' down to an image, according to this schedule. ```py # Plotting this noise schedule: plt.plot(scheduler.sigmas) plt.title('Noise Schedule') plt.xlabel('Sampling step') plt.ylabel('sigma') plt.show() ``` This 'sigma' is the amount of noise added to the latent representation. Let's visualize what this looks like by adding a bit of noise to our encoded image and then decoding this noised version: ```py noise = torch.randn_like(encoded) # Random noise sampling_step = 10 # Equivalent to step 10 out of 15 in the schedule above # encoded_and_noised = scheduler.add_noise(encoded, noise, timestep) # Diffusers 0.3 and below encoded_and_noised = scheduler.add_noise(encoded, noise, timesteps=torch.tensor([scheduler.timesteps[sampling_step]])) latents_to_pil(encoded_and_noised.float())[0] # Display ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> What does this look like at different timesteps? Experiment and see for yourself! If you uncomment the cell below you'll see that in this case the `scheduler.add_noise` function literally just adds noise scaled by sigma: `noisy_samples = original_samples + noise * sigmas` ```py # ??scheduler.add_noise ``` Other diffusion models may be trained with different noising and scheduling approaches, some of which keep the variance fairly constant across noise levels ('variance preserving') with different scaling and mixing tricks instead of having noisy latents with higher and higher variance as more noise is added ('variance exploding'). If we want to start from random noise instead of a noised image, we need to scale it by the largest sigma value used during training, ~14 in this case. And before these noisy latents are fed to the model they are scaled again in the so-called pre-conditioning step: `latent_model_input = latent_model_input / ((sigma**2 + 1) ** 0.5)` (now handled by `latent_model_input = scheduler.scale_model_input(latent_model_input, t)`). Again, this scaling/pre-conditioning differs between papers and implementations, so keep an eye out for this if you work with a different type of diffusion model. ## Loop starting from noised version of input (AKA image2image) Let's see what happens when we use our image as a starting point, adding some noise and then doing the final few denoising steps in the loop with a new prompt. We'll use a similar loop to the first demo, but we'll skip the first `start_step` steps. To noise our image we'll use code like that shown above, using the scheduler to noise it to a level equivalent to step 10 (`start_step`). ```py # Settings (same as before except for the new prompt) prompt = ["A colorful dancer, nat geo photo"] height = 512 # default height of Stable Diffusion width = 512 # default width of Stable Diffusion num_inference_steps = 50 # Number of denoising steps guidance_scale = 8 # Scale for classifier-free guidance generator = torch.manual_seed(32) # Seed generator to create the inital latent noise batch_size = 1 # Prep text (same as before) text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") with torch.no_grad(): text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] max_length = text_input.input_ids.shape[-1] uncond_input = tokenizer( [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" ) with torch.no_grad(): uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # Prep Scheduler (setting the number of inference steps) scheduler.set_timesteps(num_inference_steps) # Prep latents (noising appropriately for start_step) start_step = 10 start_sigma = scheduler.sigmas[start_step] noise = torch.randn_like(encoded) latents = scheduler.add_noise(encoded, noise, timesteps=torch.tensor([scheduler.timesteps[start_step]])) latents = latents.to(torch_device).float() # Loop for i, t in tqdm(enumerate(scheduler.timesteps)): if i >= start_step: # << This is the only modification to the loop we do # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. latent_model_input = torch.cat([latents] * 2) sigma = scheduler.sigmas[i] latent_model_input = scheduler.scale_model_input(latent_model_input, t) # predict the noise residual with torch.no_grad(): noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] # perform guidance noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = scheduler.step(noise_pred, t, latents).prev_sample latents_to_pil(latents)[0] ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> You can see that some colours and structure from the image are kept, but we now have a new picture! The more noise you add and the more steps you do, the further away it gets from the input image. This is how the popular img2img pipeline works. Again, if this is your end goal there are tools to make this easy! But you can see that under the hood this is the same as the generation loop just skipping the first few steps and starting from a noised image rather than pure noise. Explore changing how many steps are skipped and see how this affects the amount the image changes from the input. Exploring the text -> embedding pipeline We use a text encoder model to turn our text into a set of 'embeddings' which are fed to the diffusion model as conditioning. Let's follow a piece of text through this process and see how it works. ```py # Our text prompt prompt = 'A picture of a puppy' ``` We begin with tokenization: ```py # Turn the text into a sequnce of tokens: text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") text_input['input_ids'][0] # View the tokens ``` ```py tensor([49406, 320, 1674, 539, 320, 6829, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407]) ``` ```py # See the individual tokens for t in text_input['input_ids'][0][:8]: # We'll just look at the first 7 to save you from a wall of '<|endoftext|>' print(t, tokenizer.decoder.get(int(t))) ``` ```py tensor(49406) <|startoftext|> tensor(320) a</w> tensor(1674) picture</w> tensor(539) of</w> tensor(320) a</w> tensor(6829) puppy</w> tensor(49407) <|endoftext|> tensor(49407) <|endoftext|> ``` We can jump straight to the final (output) embeddings like so: ```py # Grab the output embeddings output_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] print('Shape:', output_embeddings.shape) output_embeddings ``` ```py Shape: torch.Size([1, 77, 768]) tensor([[[-0.3884, 0.0229, -0.0522, ..., -0.4899, -0.3066, 0.0675], [ 0.0290, -1.3258, 0.3085, ..., -0.5257, 0.9768, 0.6652], [ 0.6942, 0.3538, 1.0991, ..., -1.5716, -1.2643, -0.0121], ..., [-0.0221, -0.0053, -0.0089, ..., -0.7303, -1.3830, -0.3011], [-0.0062, -0.0246, 0.0065, ..., -0.7326, -1.3745, -0.2953], [-0.0536, 0.0269, 0.0444, ..., -0.7159, -1.3634, -0.3075]]], device='cuda:0', grad_fn=<NativeLayerNormBackward0>) ``` We pass our tokens through the text_encoder and we magically get some numbers we can feed to the model. How are these generated? The tokens are transformed into a set of input embeddings, which are then fed through the transformer model to get the final output embeddings. To get these input embeddings, there are actually two steps - as revealed by inspecting `text_encoder.text_model.embeddings`: ```py text_encoder.text_model.embeddings ``` ```py CLIPTextEmbeddings( (token_embedding): Embedding(49408, 768) (position_embedding): Embedding(77, 768) ) ``` ## Token embeddings The token is fed to the `token_embedding` to transform it into a vector. The function name `get_input_embeddings` here is misleading since these token embeddings need to be combined with the position embeddings before they are actually used as inputs to the model! Anyway, let's look at just the token embedding part first We can look at the embedding layer: ```py # Access the embedding layer token_emb_layer = text_encoder.text_model.embeddings.token_embedding token_emb_layer # Vocab size 49408, emb_dim 768 ``` ```py Embedding(49408, 768) ``` And embed a token like so: ```py # Embed a token - in this case the one for 'puppy' embedding = token_emb_layer(torch.tensor(6829, device=torch_device)) embedding.shape # 768-dim representation ``` ```py torch.Size([768]) ``` This single token has been mapped to a 768-dimensional vector - the token embedding. We can do the same with all of the tokens in the prompt to get all the token embeddings: ```py token_embeddings = token_emb_layer(text_input.input_ids.to(torch_device)) print(token_embeddings.shape) # batch size 1, 77 tokens, 768 values for each token_embeddings ``` ```py torch.Size([1, 77, 768]) tensor([[[ 0.0011, 0.0032, 0.0003, ..., -0.0018, 0.0003, 0.0019], [ 0.0013, -0.0011, -0.0126, ..., -0.0124, 0.0120, 0.0080], [ 0.0235, -0.0118, 0.0110, ..., 0.0049, 0.0078, 0.0160], ..., [ 0.0012, 0.0077, -0.0011, ..., -0.0015, 0.0009, 0.0052], [ 0.0012, 0.0077, -0.0011, ..., -0.0015, 0.0009, 0.0052], [ 0.0012, 0.0077, -0.0011, ..., -0.0015, 0.0009, 0.0052]]], device='cuda:0', grad_fn=<EmbeddingBackward0>) ``` ## Positional Embeddings Positional embeddings tell the model where in a sequence a token is. Much like the token embedding, this is a set of (optionally learnable) parameters. But now instead of dealing with ~50k tokens we just need one for each position (77 total): ```py pos_emb_layer = text_encoder.text_model.embeddings.position_embedding pos_emb_layer ``` ```py Embedding(77, 768) ``` We can get the positional embedding for each position: ```py position_ids = text_encoder.text_model.embeddings.position_ids[:, :77] position_embeddings = pos_emb_layer(position_ids) print(position_embeddings.shape) position_embeddings ``` ```py torch.Size([1, 77, 768]) tensor([[[ 0.0016, 0.0020, 0.0002, ..., -0.0013, 0.0008, 0.0015], [ 0.0042, 0.0029, 0.0002, ..., 0.0010, 0.0015, -0.0012], [ 0.0018, 0.0007, -0.0012, ..., -0.0029, -0.0009, 0.0026], ..., [ 0.0216, 0.0055, -0.0101, ..., -0.0065, -0.0029, 0.0037], [ 0.0188, 0.0073, -0.0077, ..., -0.0025, -0.0009, 0.0057], [ 0.0330, 0.0281, 0.0289, ..., 0.0160, 0.0102, -0.0310]]], device='cuda:0', grad_fn=<EmbeddingBackward0>) ``` ## Combining token and position embeddings Time to combine the two. How do we do this? Just add them! Other approaches are possible but for this model this is how it is done. Combining them in this way gives us the final input embeddings ready to feed through the transformer model: ```py # And combining them we get the final input embeddings input_embeddings = token_embeddings + position_embeddings print(input_embeddings.shape) input_embeddings ``` ```py torch.Size([1, 77, 768]) tensor([[[ 2.6770e-03, 5.2133e-03, 4.9323e-04, ..., -3.1321e-03, 1.0659e-03, 3.4316e-03], [ 5.5371e-03, 1.7510e-03, -1.2381e-02, ..., -1.1410e-02, 1.3508e-02, 6.8378e-03], [ 2.5356e-02, -1.1019e-02, 9.7663e-03, ..., 1.9460e-03, 6.8375e-03, 1.8573e-02], ..., [ 2.2781e-02, 1.3262e-02, -1.1241e-02, ..., -8.0054e-03, -2.0560e-03, 8.9366e-03], [ 2.0026e-02, 1.5015e-02, -8.7638e-03, ..., -4.0313e-03, 1.8487e-05, 1.0885e-02], [ 3.4206e-02, 3.5826e-02, 2.7768e-02, ..., 1.4465e-02, 1.1110e-02, -2.5745e-02]]], device='cuda:0', grad_fn=<AddBackward0>) ``` We can check that these are the same as the result we'd get from `text_encoder.text_model.embeddings`: ```py # The following combines all the above steps (but doesn't let us fiddle with them!) text_encoder.text_model.embeddings(text_input.input_ids.to(torch_device)) ``` ```py tensor([[[ 2.6770e-03, 5.2133e-03, 4.9323e-04, ..., -3.1321e-03, 1.0659e-03, 3.4316e-03], [ 5.5371e-03, 1.7510e-03, -1.2381e-02, ..., -1.1410e-02, 1.3508e-02, 6.8378e-03], [ 2.5356e-02, -1.1019e-02, 9.7663e-03, ..., 1.9460e-03, 6.8375e-03, 1.8573e-02], ..., [ 2.2781e-02, 1.3262e-02, -1.1241e-02, ..., -8.0054e-03, -2.0560e-03, 8.9366e-03], [ 2.0026e-02, 1.5015e-02, -8.7638e-03, ..., -4.0313e-03, 1.8487e-05, 1.0885e-02], [ 3.4206e-02, 3.5826e-02, 2.7768e-02, ..., 1.4465e-02, 1.1110e-02, -2.5745e-02]]], device='cuda:0', grad_fn=<AddBackward0>) ``` ## Feeding these through the transformer model <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> We want to mess with these input embeddings (specifically the token embeddings) before we send them through the rest of the model, but first we should check that we know how to do that. I read the code of the text_encoders `forward` method, and based on that the code for the `forward` method of the text_model that the text_encoder wraps. To inspect it yourself, type `??text_encoder.text_model.forward` and you'll get the function info and source code - a useful debugging trick! Anyway, based on that we can copy in the bits we need to get the so-called 'last hidden state' and thus generate our final embeddings: ```py def get_output_embeds(input_embeddings): # CLIP's text model uses causal mask, so we prepare it here: bsz, seq_len = input_embeddings.shape[:2] causal_attention_mask = text_encoder.text_model._build_causal_attention_mask(bsz, seq_len, dtype=input_embeddings.dtype) # Getting the output embeddings involves calling the model with passing output_hidden_states=True # so that it doesn't just return the pooled final predictions: encoder_outputs = text_encoder.text_model.encoder( inputs_embeds=input_embeddings, attention_mask=None, # We aren't using an attention mask so that can be None causal_attention_mask=causal_attention_mask.to(torch_device), output_attentions=None, output_hidden_states=True, # We want the output embs not the final output return_dict=None, ) # We're interested in the output hidden state only output = encoder_outputs[0] # There is a final layer norm we need to pass these through output = text_encoder.text_model.final_layer_norm(output) # And now they're ready! return output out_embs_test = get_output_embeds(input_embeddings) # Feed through the model with our new function print(out_embs_test.shape) # Check the output shape out_embs_test # Inspect the output ``` ```py torch.Size([1, 77, 768]) tensor([[[-0.3884, 0.0229, -0.0522, ..., -0.4899, -0.3066, 0.0675], [ 0.0290, -1.3258, 0.3085, ..., -0.5257, 0.9768, 0.6652], [ 0.6942, 0.3538, 1.0991, ..., -1.5716, -1.2643, -0.0121], ..., [-0.0221, -0.0053, -0.0089, ..., -0.7303, -1.3830, -0.3011], [-0.0062, -0.0246, 0.0065, ..., -0.7326, -1.3745, -0.2953], [-0.0536, 0.0269, 0.0444, ..., -0.7159, -1.3634, -0.3075]]], device='cuda:0', grad_fn=<NativeLayerNormBackward0>) ``` Note that these match the `output_embeddings` we saw near the start - we've figured out how to split up that one step ("get the text embeddings") into multiple sub-steps ready for us to modify. Now that we have this process in place, we can replace the input embedding of a token with a new one of our choice - which in our final use-case will be something we learn. To demonstrate the concept though, let's replace the input embedding for 'puppy' in the prompt we've been playing with with the embedding for token 2368, get a new set of output embeddings based on this, and use these to generate an image to see what we get: ```py prompt = 'A picture of a puppy' # Tokenize text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") input_ids = text_input.input_ids.to(torch_device) # Get token embeddings token_embeddings = token_emb_layer(input_ids) # The new embedding. In this case just the input embedding of token 2368... replacement_token_embedding = text_encoder.get_input_embeddings()(torch.tensor(2368, device=torch_device)) # Insert this into the token embeddings ( token_embeddings[0, torch.where(input_ids[0]==6829)] = replacement_token_embedding.to(torch_device) # Combine with pos embs input_embeddings = token_embeddings + position_embeddings # Feed through to get final output embs modified_output_embeddings = get_output_embeds(input_embeddings) print(modified_output_embeddings.shape) modified_output_embeddings ``` ```py torch.Size([1, 77, 768]) tensor([[[-0.3884, 0.0229, -0.0522, ..., -0.4899, -0.3066, 0.0675], [ 0.0290, -1.3258, 0.3085, ..., -0.5257, 0.9768, 0.6652], [ 0.6942, 0.3538, 1.0991, ..., -1.5716, -1.2643, -0.0121], ..., [-0.6034, -0.5322, 0.0629, ..., -0.3964, 0.0877, -0.9558], [-0.5936, -0.5407, 0.0731, ..., -0.3876, 0.0906, -0.9436], [-0.6393, -0.4703, 0.1103, ..., -0.3904, 0.1351, -0.9726]]], device='cuda:0', grad_fn=<NativeLayerNormBackward0>) ``` The first few are the same, the last aren't. Everything at and after the position of the token we're replacing will be affected. If all went well, we should see something other than a puppy when we use these to generate an image. And sure enough, we do! ```py #Generating an image with these modified embeddings def generate_with_embs(text_embeddings): height = 512 # default height of Stable Diffusion width = 512 # default width of Stable Diffusion num_inference_steps = 30 # Number of denoising steps guidance_scale = 7.5 # Scale for classifier-free guidance generator = torch.manual_seed(32) # Seed generator to create the inital latent noise batch_size = 1 max_length = text_input.input_ids.shape[-1] uncond_input = tokenizer( [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" ) with torch.no_grad(): uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # Prep Scheduler scheduler.set_timesteps(num_inference_steps) # Prep latents latents = torch.randn( (batch_size, unet.in_channels, height // 8, width // 8), generator=generator, ) latents = latents.to(torch_device) latents = latents * scheduler.init_noise_sigma # Loop for i, t in tqdm(enumerate(scheduler.timesteps)): # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. latent_model_input = torch.cat([latents] * 2) sigma = scheduler.sigmas[i] latent_model_input = scheduler.scale_model_input(latent_model_input, t) # predict the noise residual with torch.no_grad(): noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] # perform guidance noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = scheduler.step(noise_pred, t, latents).prev_sample return latents_to_pil(latents)[0] ``` ```py generate_with_embs(modified_output_embeddings) ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> Suprise! Now you know what token 2368 means ;) What can we do with this? Why did we go to all of this trouble? Well, we'll see a more compelling use-case shortly but the tl;dr is that once we can access and modify the token embeddings we can do tricks like replacing them with something else. In the example we just did, that was just another token embedding from the model's vocabulary, equivalent to just editing the prompt. But we can also mix tokens - for example, here's a half-puppy-half-skunk: ```py # In case you're wondering how to get the token for a word, or the embedding for a token: prompt = 'skunk' print('tokenizer(prompt):', tokenizer(prompt)) print('token_emb_layer([token_id]) shape:', token_emb_layer(torch.tensor([8797], device=torch_device)).shape) ``` ```py tokenizer(prompt): {'input_ids': [49406, 42194, 49407], 'attention_mask': [1, 1, 1]} token_emb_layer([token_id]) shape: torch.Size([1, 768]) ``` ```py prompt = 'A picture of a puppy' # Tokenize text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") input_ids = text_input.input_ids.to(torch_device) # Get token embeddings token_embeddings = token_emb_layer(input_ids) # The new embedding. Which is now a mixture of the token embeddings for 'puppy' and 'skunk' puppy_token_embedding = token_emb_layer(torch.tensor(6829, device=torch_device)) skunk_token_embedding = token_emb_layer(torch.tensor(42194, device=torch_device)) replacement_token_embedding = 0.5*puppy_token_embedding + 0.5*skunk_token_embedding # Insert this into the token embeddings ( token_embeddings[0, torch.where(input_ids[0]==6829)] = replacement_token_embedding.to(torch_device) # Combine with pos embs input_embeddings = token_embeddings + position_embeddings # Feed through to get final output embs modified_output_embeddings = get_output_embeds(input_embeddings) # Generate an image with these generate_with_embs(modified_output_embeddings) ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> ## Textual Inversion OK, so we can slip in a modified token embedding, and use this to generate an image. We used the token embedding for 'cat' in the above example, but what if instead could 'learn' a new token embedding for a specific concept? This is the idea behind 'Textual Inversion', in which a few example images are used to create a new token embedding: <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> Diagram from the textual inversion [blog post](https://textual-inversion.github.io/) - note it doesn't show the positional embeddings step for simplicity We won't cover how this training works, but we can try loading one of these new 'concepts' from the [community-created SD concepts library](https://huggingface.co/sd-concepts-library) and see how it fits in with our example above. I'll use [https://huggingface.co/sd-concepts-library/birb-style](https://huggingface.co/sd-concepts-library/birb-style) since it was the first one I made :) Download the learned_embeds.bin file from there and upload the file to wherever this notebook is before running this next cell: ```py birb_embed = torch.load('learned_embeds.bin') birb_embed.keys(), birb_embed['<birb-style>'].shape ``` ```py (dict_keys(['<birb-style>']), torch.Size([768])) ``` We get a dictionary with a key (the special placeholder I used, ) and the corresponding token embedding. As in the previous example, let's replace the 'puppy' token embedding with this and see what happens: ```py prompt = 'A mouse in the style of puppy' # Tokenize text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") input_ids = text_input.input_ids.to(torch_device) # Get token embeddings token_embeddings = token_emb_layer(input_ids) # The new embedding - our special birb word replacement_token_embedding = birb_embed['<birb-style>'].to(torch_device) # Insert this into the token embeddings token_embeddings[0, torch.where(input_ids[0]==6829)] = replacement_token_embedding.to(torch_device) # Combine with pos embs input_embeddings = token_embeddings + position_embeddings # Feed through to get final output embs modified_output_embeddings = get_output_embeds(input_embeddings) # And generate an image with this: generate_with_embs(modified_output_embeddings) ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> The token for 'puppy' was replaced with one that captures a particular style of painting, but it could just as easily represent a specific object or class of objects. Again, there is a [nice inference notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) from hf to make it easy to use the different concepts, that properly handles using the names in prompts ("A <cat-toy> in the style of <birb-style>") without worrying about all this manual stuff. The goal of this notebook is to pull back the curtain a bit so you know what is going on behind the scenes :) ## Messing with Embeddings Besides just replacing the token embedding of a single word, there are various other tricks we can try. For example, what if we create a 'chimera' by averaging the embeddings of two different prompts? ```py # Embed two prompts text_input1 = tokenizer(["A mouse"], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") text_input2 = tokenizer(["A leopard"], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") with torch.no_grad(): text_embeddings1 = text_encoder(text_input1.input_ids.to(torch_device))[0] text_embeddings2 = text_encoder(text_input2.input_ids.to(torch_device))[0] # Mix them together mix_factor = 0.35 mixed_embeddings = (text_embeddings1*mix_factor + \ text_embeddings2*(1-mix_factor)) # Generate! generate_with_embs(mixed_embeddings) ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> ## The UNET and CFG Now it's time we looked at the actual diffusion model. This is typically a Unet that takes in the noisy latents (x) and predicts the noise. We use a conditional model that also takes in the timestep (t) and our text embedding (aka encoder_hidden_states) as conditioning. Feeding all of these into the model looks like this: `noise_pred = unet(latents, t, encoder_hidden_states=text_embeddings)["sample"]` We can try it out and see what the output looks like: ```py # Prep Scheduler scheduler.set_timesteps(num_inference_steps) # What is our timestep t = scheduler.timesteps[0] sigma = scheduler.sigmas[0] # A noisy latent latents = torch.randn( (batch_size, unet.in_channels, height // 8, width // 8), generator=generator, ) latents = latents.to(torch_device) latents = latents * scheduler.init_noise_sigma # Text embedding text_input = tokenizer(['A macaw'], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") with torch.no_grad(): text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] # Run this through the unet to predict the noise residual with torch.no_grad(): noise_pred = unet(latents, t, encoder_hidden_states=text_embeddings)["sample"] latents.shape, noise_pred.shape # We get preds in the same shape as the input ``` ```py (torch.Size([1, 4, 64, 64]), torch.Size([1, 4, 64, 64])) ``` Given a set of noisy latents, the model predicts the noise component. We can remove this noise from the noisy latents to see what the output image looks like (`latents_x0 = latents - sigma * noise_pred`). And we can add most of the noise back to this predicted output to get the (slightly less noisy hopefully) input for the next diffusion step. To visualize this let's generate another image, saving both the predicted output (x0) and the next step (xt-1) after every step: ```py prompt = 'Oil painting of an otter in a top hat' height = 512 width = 512 num_inference_steps = 50 guidance_scale = 8 generator = torch.manual_seed(32) batch_size = 1 # Make a folder to store results !rm -rf steps/ !mkdir -p steps/ # Prep text text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") with torch.no_grad(): text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] max_length = text_input.input_ids.shape[-1] uncond_input = tokenizer( [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" ) with torch.no_grad(): uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # Prep Scheduler scheduler.set_timesteps(num_inference_steps) # Prep latents latents = torch.randn( (batch_size, unet.in_channels, height // 8, width // 8), generator=generator, ) latents = latents.to(torch_device) latents = latents * scheduler.init_noise_sigma # Loop for i, t in tqdm(enumerate(scheduler.timesteps)): # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. latent_model_input = torch.cat([latents] * 2) sigma = scheduler.sigmas[i] latent_model_input = scheduler.scale_model_input(latent_model_input, t) # predict the noise residual with torch.no_grad(): noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] # perform guidance noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # Get the predicted x0: # latents_x0 = latents - sigma * noise_pred # Calculating ourselves latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample # Using the scheduler (Diffusers 0.4 and above) # compute the previous noisy sample x_t -> x_t-1 latents = scheduler.step(noise_pred, t, latents).prev_sample # To PIL Images im_t0 = latents_to_pil(latents_x0)[0] im_next = latents_to_pil(latents)[0] # Combine the two images and save for later viewing im = Image.new('RGB', (1024, 512)) im.paste(im_next, (0, 0)) im.paste(im_t0, (512, 0)) im.save(f'steps/{i:04}.jpeg') ``` ```py # Make and show the progress video (change width to 1024 for full res) !ffmpeg -v 1 -y -f image2 -framerate 12 -i steps/%04d.jpeg -c:v libx264 -preset slow -qp 18 -pix_fmt yuv420p out.mp4 mp4 = open('out.mp4','rb').read() data_url = "data:video/mp4;base64," + b64encode(mp4).decode() HTML(""" <video width=600 controls> <source src="%s" type="video/mp4"> </video> """ % data_url) ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> The version on the right shows the predicted 'final output' (x0) at each step, and this is what is usually used for progress videos etc. The version on the left is the 'next step'. I found it interesteing to compare the two - watching the progress videos only you'd think drastic changes are happening expecially at early stages, but since the changes made per-step are relatively small the actual process is much more gradual. ## Classifier Free Guidance By default, the model doesn't often do what we ask. If we want it to follow the prompt better, we use a hack called CFG. There's a good explanation in this video (AI coffee break GLIDE). In the code, this comes down to us doing: `noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)` This works suprisingly well :) Explore changing the guidance_scale in the code above and see how this affects the results. How high can you push it before the results get worse? ## Sampling There is still some complexity hidden from us inside `latents = scheduler.step(noise_pred, i, latents)["prev_sample"]`. How exactly does the sampler go from the current noisy latents to a slightly less noisy version? Why don't we just use the model in a single step? Are there other ways to view this? The model tries to predict the noise in an image. For low noise values, we assume it does a pretty good job. For higher noise levels, it has a hard task! So instead of producing a perfect image, the results tend to look like a blurry mess - see the start of the video above for a visual! So, samplers use the model predictions to move a small amount towards the model prediction (removing some of the noise) and then get another prediction based on this marginally-less-rubbish input, and hope that this iteratively improves the result. Different samplers do this in different ways. You can try to inspect the code for the default LMS sampler with: ```py # ??scheduler.step ``` ## Guidance OK, final trick! How can we add some extra control to this generation process? At each step, we're going to use our model as before to predict the noise component of x. Then we'll use this to produce a predicted output image, and apply some loss function to this image. This function can be anything, but let's demo with a super simple example. If we want images that have a lot of blue, we can craft a loss function that gives a high loss if pixels have a low blue component: ```py def blue_loss(images): # How far are the blue channel values to 0.9: error = torch.abs(images[:,2] - 0.9).mean() # [:,2] -> all images in batch, only the blue channel return error ``` During each update step, we find the gradient of the loss with respect to the current noisy latents, and tweak them in the direction that reduces this loss as well as performing the normal update step: ```py prompt = 'A campfire (oil on canvas)' #@param height = 512 # default height of Stable Diffusion width = 512 # default width of Stable Diffusion num_inference_steps = 50 #@param # Number of denoising steps guidance_scale = 8 #@param # Scale for classifier-free guidance generator = torch.manual_seed(32) # Seed generator to create the inital latent noise batch_size = 1 blue_loss_scale = 200 #@param # Prep text text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") with torch.no_grad(): text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] # And the uncond. input as before: max_length = text_input.input_ids.shape[-1] uncond_input = tokenizer( [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" ) with torch.no_grad(): uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # Prep Scheduler scheduler.set_timesteps(num_inference_steps) # Prep latents latents = torch.randn( (batch_size, unet.in_channels, height // 8, width // 8), generator=generator, ) latents = latents.to(torch_device) latents = latents * scheduler.init_noise_sigma # Loop for i, t in tqdm(enumerate(scheduler.timesteps)): # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. latent_model_input = torch.cat([latents] * 2) sigma = scheduler.sigmas[i] latent_model_input = scheduler.scale_model_input(latent_model_input, t) # predict the noise residual with torch.no_grad(): noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] # perform CFG noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) #### ADDITIONAL GUIDANCE ### if i%5 == 0: # Requires grad on the latents latents = latents.detach().requires_grad_() # Get the predicted x0: # latents_x0 = latents - sigma * noise_pred latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample # Decode to image space denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1) # Calculate loss loss = blue_loss(denoised_images) * blue_loss_scale # Occasionally print it out if i%10==0: print(i, 'loss:', loss.item()) # Get gradient cond_grad = torch.autograd.grad(loss, latents)[0] # Modify the latents based on this gradient latents = latents.detach() - cond_grad * sigma**2 # Now step with scheduler latents = scheduler.step(noise_pred, t, latents).prev_sample latents_to_pil(latents)[0] ``` ```py 0 loss: 182.02133178710938 10 loss: 43.55351257324219 20 loss: 15.30621337890625 30 loss: 9.746519088745117 40 loss: 8.846868515014648 ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> Tweak the scale (`blue_loss_scale`) - at low values, the image is mostly red and orange thanks to the prompt. At higher values, it is mostly bluish! Too high and we get a plain blue image. Since this is slow, you'll notice that I only apply this loss once every 5 iterations - this was a suggestion from Jeremy and we left it in because for this demo it saves time and still works. For your own tests you may want to explore using a lower scale for the loss and applying it every iteration instead :) NB: We should set latents requires_grad=True before we do the forward pass of the unet (removing with `torch.no_grad()`) if we want mode accurate gradients. BUT this requires a lot of extra memory. You'll see both approaches used depending on whose implementation you're looking at. Guiding with classifier models can give you images of a specific class. Guiding with a model like CLIP can help better match a text prompt. Guiding with a style loss can help add a particular style. Guiding with some sort of perceptual loss can force it towards the overall look af a target image. And so on. ##Conclusions Hopefully now you have a slightly better idea of what is happening when you make an image with one of these models, and how you can modify the process in creative ways. I hope you're inspired to make something fun :) This notebook was written by Jonathan Whitaker, adapted from [Grokking Stable Diffusion](https://colab.research.google.com/drive/1dlgggNa5Mz8sEAGU0wFCHhGLFooW_pf1?usp=sharing) which was my early attempts to understand these components for myself. If you spot bugs or have questions, feel free to reach out to me @johnowhitaker :) Enjoy!
diffusion-models-class/units/en/unit3/3.mdx/0
{ "file_path": "diffusion-models-class/units/en/unit3/3.mdx", "repo_id": "diffusion-models-class", "token_count": 20868 }
141
# Sprint ControlNet en JAX/Diffusers Bienvenue au sprint communautaire en JAX/Diffusers ! L'objectif de ce sprint est de travailler sur des modèles de diffusion amusants et créatifs en utilisant JAX et Diffusers. Lors de cet événement, nous créerons diverses applications avec des modèles de diffusion en JAX/Flax et Diffusers en utilisant des heures TPU gratuites généreusement fournies par Google Cloud. Ce document présente toutes les informations importantes pour faire une soumission au sprint. ## Organisation Les participants peuvent proposer des idées pour un projet intéressant impliquant des modèles de diffusion. Des équipes de 3 à 5 personnes seront ensuite formées autour des projets les plus prometteurs et les plus intéressants. Assurez-vous de lire la section Communication pour savoir comment proposer des projets, commenter les idées de projet des autres participants et créer une équipe. Pour aider chaque équipe à mener à bien son projet, nous organiserons des conférences données par des scientifiques et des ingénieurs de Google, de Hugging Face et de la communauté open source. Les conférences auront lieu le 17 avril. Assurez-vous d'assister aux conférences pour tirer le meilleur parti de votre participation ! Consultez la section Conférences pour avoir une vue d'ensemble des conférences, y compris l'orateur et l'heure de la conférence. Chaque équipe bénéficiera ensuite d'un **accès gratuit à une VM TPU v4-8** du 14 avril au 1er mai. De plus, nous fournirons un exemple d'entraînement en JAX/Flax et Diffusers pour entraîner un [ControlNet](https://huggingface.co/blog/controlnet) afin de lancer votre projet. Nous fournirons également des exemples sur la façon de préparer les jeux de données. Pendant le sprint, nous nous assurerons de répondre à toutes les questions que vous pourriez avoir sur JAX/Flax et Diffusers et nous aiderons chaque équipe autant que possible ! > Nous ne distribuerons pas de TPU pour les équipes composées d'un seul membre. Nous vous encourageons donc à rejoindre une équipe ou à trouver des coéquipiers pour votre idée. À la fin du sprint, chaque soumission sera évaluée par un jury et les trois meilleures démonstrations recevront un prix. Consultez la section Comment soumettre une démo pour plus d'informations et de suggestions sur la manière de soumettre votre projet. > 💡 Note : Même si nous fournissons un exemple pour entraîner ControlNet, les participants peuvent proposer des idées qui n'impliquent pas du tout un ControlNet du moment qu'elles sont centrées sur les modèles de diffusion. ## Dates importantes - **29/03** Annonce officielle de la semaine de la communauté. - **31/03** Commencez à former des groupes dans le canal #jax-diffusers-ideas sur Discord. - **10/04** Collecte des données. - **13/04 & 14/04 & 17/04** [Conférences de lancement sur YouTube](https://www.youtube.com/watch?v=SOj2sxgvFe0). - **14/04 à 17/04.** Début de l'accès aux TPU. - **01/05** Fermeture de l'accès aux TPU. - **08/05** : Annonce des 10 meilleurs projets et des prix. > 💡 Note : Nous accepterons les candidatures tout au long du sprint. ## Communication Toutes les communications importantes auront lieu sur notre serveur Discord. Rejoignez le serveur en utilisant [ce lien] (https://hf.co/join/discord). Après avoir rejoint le serveur, prenez le rôle Diffusers dans le canal `#role-assignment` et dirigez-vous vers le canal `#jax-diffusers-ideas` pour partager votre idée sous la forme d'un message de forum. Pour vous inscrire, remplissez le formulaire d'inscription et nous vous donnerons accès à deux canaux Discord supplémentaires pour les discussions et le support technique, ainsi qu'un accès aux TPU. Les annonces importantes de l'équipe Hugging Face, Flax/JAX et Google Cloud seront publiées sur le serveur. Le serveur Discord sera le lieu central où les participants pourront publier leurs résultats, partager leurs expériences d'apprentissage, poser des questions et obtenir une assistance technique pour les divers obstacles qu'ils rencontrent. Pour les problèmes liés à Flax/JAX, Diffusers, Datasets ou pour des questions spécifiques à votre projet, nous interagirons à travers les dépôts publics et les forums : - Flax: [Issues](https://github.com/google/flax/issues), [Questions](https://github.com/google/flax/discussions) - JAX: [Issues](https://github.com/google/jax/issues), [Questions](https://github.com/google/jax/discussions) - 🤗 Diffusers: [Issues](https://github.com/huggingface/diffusers/issues), [Questions](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) - 🤗 Datasets: [Issues](https://github.com/huggingface/datasets/issues), [Questions](https://discuss.huggingface.co/c/datasets/10) - Questions spécifiques aux projets : Elles peuvent être posées sur le canal #jax-diffusers-ideas sur Discord. - Questions relatives au TPU : Canal `#jax-diffusers-tpu-support` sur Discord. - Discussion générale : `#jax-diffusers-sprint channel` sur Discord. Vous aurez accès aux canaux `#jax-diffusers-tpu-support` et `#jax-diffusers-sprint` une fois que vous aurez été accepté pour participer au sprint. Lorsque vous demandez de l'aide, nous vous encourageons à poster le lien vers le [forum](https://discuss.huggingface.co) sur le serveur Discord, plutôt que de poster directement des *issues* ou des questions. De cette façon, nous nous assurons que tout le monde peut bénéficier de vos questions, même après la fin du sprint. > Note : Après le 10 avril, si vous vous êtes inscrit sur le formulaire Google, mais que vous n'êtes pas dans le canal Discord, veuillez laisser un message sur [l'annonce officielle du forum](https://discuss.huggingface.co/t/controlling-stable-diffusion-with-jax-and-diffusers-using-v4-tpus/35187/2) et envoyer un ping à `@mervenoyan`, `@sayakpaul`, et `@patrickvonplaten`. Il se peut que nous prenions un jour pour traiter ces demandes. ## Conférences Nous avons invité d'éminents chercheurs et ingénieurs de Google, Hugging Face, et de la communauté open-source qui travaillent dans le domaine de l'IA générative. Nous mettrons à jour cette section avec des liens vers les conférences, alors gardez un œil ici ou sur Discord dans le canal diffusion models core-announcements et programmez vos rappels ! ### **13 avril 2023** | Intervenant | Sujet | Horaire | Video | |---|---|---|---| [Emiel Hoogeboom, Google Brain](https://twitter.com/emiel_hoogeboom?lang=en) | Pixel-Space Diffusion models for High Resolution Images | 4.00pm-4.40pm CEST / 7.00am-7.40am PST| [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=iw2WCAGxdQ4) | | [Apolinário Passos, Hugging Face](https://twitter.com/multimodalart?lang=en) | Introduction to Diffusers library | 4.40pm-5.20pm CEST / 7.40am-08.20am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=iw2WCAGxdQ4) | [Ting Chen, Google Brain](https://twitter.com/tingchenai?lang=en) | Diffusion++: discrete data and high-dimensional generation | 5.45pm-6.25pm CEST / 08.45am-09.25am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=iw2WCAGxdQ4) | ### **14 avril 2023** | Intervenant | Sujet | Horaire | Video | |---|---|---|---| | [Tim Salimans, Google Brain](https://twitter.com/timsalimans?lang=en) | Efficient image and video generation with distilled diffusion models | 4.00pm-4.40pm CEST / 7.00am-7.40am PST| [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=6f5chgbKjSg&ab_channel=HuggingFace) | | [Suraj Patil, Hugging Face](https://twitter.com/psuraj28?lang=en) | Masked Generative Models: MaskGIT/Muse | 4.40pm-5.20pm CEST / 7.40am-08.20am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=6f5chgbKjSg&ab_channel=HuggingFace) | | [Sabrina Mielke, John Hopkins University](https://twitter.com/sjmielke?lang=en) | From stateful code to purified JAX: how to build your neural net framework | 5.20pm-6.00pm CEST / 08.20am-09.00am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=6f5chgbKjSg&ab_channel=HuggingFace) | ### **17 avril 2023** | Intervenant | Sujet | Horaire | Video | |---|---|---|---| | [Andreas Steiner, Google Brain](https://twitter.com/AndreasPSteiner) | JAX & ControlNet | 4.00pm-4.40pm CEST / 7.00am-7.40am PST| [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=SOj2sxgvFe0) | | [Boris Dayma, craiyon](https://twitter.com/borisdayma?lang=en) | DALL-E Mini | 4.40pm-5.20pm CEST / 7.40am-08.20am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=SOj2sxgvFe0) | | [Margaret Mitchell, Hugging Face](https://twitter.com/mmitchell_ai?ref_src=twsrc%5Egoogle%7Ctwcamp%5Eserp%7Ctwgr%5Eauthor) | Ethics of Text-to-Image | 5.20pm-6.00pm CEST / 08.20am-09.00am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=SOj2sxgvFe0) | ## Données et prétraitement Dans cette section, nous verrons comment construire votre propre jeu de données pour entraîner ControlNet. ### Préparer un grand jeu de données local #### Monter un disque Si vous avez besoin d'espace supplémentaire, vous pouvez suivre [ce guide](https://cloud.google.com/tpu/docs/setup-persistent-disk#prerequisites) pour créer un disque persistant, l'attacher à votre VM TPU et créer un répertoire pour monter le disque. Vous pouvez ensuite utiliser ce répertoire pour stocker votre jeu de données. Par ailleurs, la VM TPU attribuée à votre équipe dispose d'un disque de stockage persistant de 3 To. Pour apprendre à l'utiliser, consultez [ce guide](https://cloud.google.com/tpu/docs/setup-persistent-disk#mount-pd). #### Prétraitement des données Nous montrons ici comment préparer un grand jeu de données pour entraîner un modèle ControlNet avec filtre de Canny. Plus précisément, nous fournissons un [exemple de script](./dataset_tools/coyo_1m_dataset_preprocess.py) qui : * Sélectionne 1 million de paires image-texte à partir d'un jeu de données existant [COYO-700M](https://huggingface.co/datasets/kakaobrain/coyo-700m). * Télécharge chaque image et utilise le filtre de Canny pour générer l'image de conditionnement. * Crée un métafichier qui relie toutes les images et les images traitées à leurs légendes. Utilisez la commande suivante pour exécuter le script de prétraitement des données de l'exemple. Si vous avez monté un disque sur votre TPU, vous devez placer vos fichiers `train_data_dir` et `cache_dir` sur le disque monté. ```py python3 coyo_1m_dataset_preprocess.py \ --train_data_dir="/mnt/disks/persist/data" \ --cache_dir="/mnt/disks/persist" \ --max_train_samples=1000000 \ --num_proc=16 ``` Une fois le script exécuté, vous trouverez un dossier de données dans le répertoire `train_data_dir` spécifié avec la structure de dossier ci-dessous : ```py data ├── images │ ├── image_1.png │ ├── ....... │ └── image_1000000.jpeg ├── processed_images │ ├── image_1.png │ ├── ....... │ └── image_1000000.jpeg └── meta.jsonl ``` #### Charger un jeu de données Pour charger un jeu de données à partir du dossier de données que vous venez de créer, vous devez ajouter un script de chargement de jeu de données à votre dossier de données. Le script de chargement de données doit porter le même nom que le dossier. Par exemple, si votre dossier de données est `data`, vous devez ajouter un script de chargement de données nommé `data.py`. Nous fournissons un [exemple de script de chargement de données](./dataset_tools/data.py) que vous pouvez utiliser. Tout ce que vous avez à faire est de mettre à jour le `DATA_DIR` avec le chemin correct de votre dossier de données. Pour plus de détails sur l'écriture d'un script de chargement de données, reportez-vous à la [documentation] (https://huggingface.co/docs/datasets/dataset_script). Une fois que le script de chargement de données est ajouté à votre dossier de données, vous pouvez le charger avec : ```py dataset = load_dataset("/mnt/disks/persist/data", cache_dir="/mnt/disks/persist" ) ``` Notez que vous pouvez utiliser `--train_data_dir` pour passer le répertoire de votre dossier de données au script d'entraînement et générer votre jeu de données automatiquement pendant l'entraînement. Pour les grands jeux de données, nous recommandons de générer le jeu de données une seule fois et de le sauvegarder sur le disque à l'aide de la commande ```py dataset.save_to_disk("/mnt/disks/persist/dataset") ``` Vous pouvez ensuite réutiliser le jeu de données sauvegardé pour votre entraînement en passant `--load_from_disk`. Voici un exemple d'exécution d'un script d'entraînement qui chargera le jeu de données depuis le disque. ```py export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="/mnt/disks/persist/canny_model" export DATASET_DIR="/mnt/disks/persist/dataset" export DISK_DIR="/mnt/disks/persist" python3 train_controlnet_flax.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --train_data_dir=$DATASET_DIR \ --load_from_disk \ --cache_dir=$DISK_DIR \ --resolution=512 \ --learning_rate=1e-5 \ --train_batch_size=2 \ --revision="non-ema" \ --from_pt \ --max_train_steps=500000 \ --checkpointing_steps=10000 \ --dataloader_num_workers=16 ``` ### Préparer un jeu de données avec MediaPipe et Hugging Face Nous fournissons un *notebook* ([ ![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/community-events/blob/main/jax-controlnet-sprint/dataset_tools/create_pose_dataset.ipynb)) qui vous montre comment préparer un jeu de données pour entraîner ControlNet en utilisant [MediaPipe](https://developers.google.com/mediapipe) et Hugging Face. Plus précisément, dans le *notebook*, nous montrons : * Comment tirer parti des solutions MediaPipe pour extraire les articulations du corps de la pose à partir des images d'entrée. * Prédire les légendes en utilisant BLIP-2 à partir des images d'entrée en utilisant 🤗 Transformers. * Construire et pousser le jeu de données final vers le Hugging Face Hub en utilisant 🤗 Datasets. Vous pouvez vous référer au *notebook* pour créer vos propres jeux de données en utilisant d'autres solutions MediaPipe. Ci-dessous, nous listons toutes les solutions pertinentes : * [Détection des points de repère de pose](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker) * [Détection de repères de visage](https://developers.google.com/mediapipe/solutions/vision/face_landmarker) * [Segmentation de selfie](https://developers.google.com/mediapipe/solutions/vision/image_segmenter) ## Entraîner ControlNet C'est peut-être la partie la plus amusante et la plus intéressante de ce document, car nous vous montrons ici comment entraîner un modèle ControlNet personnalisé. > 💡 Note : Pour ce sprint, vous n'êtes PAS limité à entraîner des ControlNets. Nous fournissons ce script d'entraînement comme référence pour vous permettre de démarrer. Pour un entraînement plus rapide sur les TPU et les GPU, vous pouvez tirer parti de l'exemple d'entraînement Flax. Suivez les instructions ci-dessus pour obtenir le modèle et le jeu de données avant d'exécuter le script. ### Mise en place de la VM TPU Avant de continuer avec le reste de cette section, vous devez vous assurer que l'adresse email que vous utilisez a été ajoutée au projet `hf-flax` sur Google Cloud Platform. Si ce n'est pas le cas, merci de nous le faire savoir sur le serveur Discord (vous pouvez taguer `@sayakpaul`, `@merve`, et `@patrickvonplaten`). Dans ce qui suit, nous allons décrire comment le faire en utilisant une console standard, mais vous devriez également être en mesure de vous connecter à la VM TPU via des IDE, comme Visual Studio Code, etc. 1. Vous devez installer le [Google Cloud SDK](https://cloud.google.com/sdk/docs/install). Veuillez suivre les instructions sur https://cloud.google.com/sdk. 2. Une fois le Google Cloud SDK installé, vous devez configurer votre compte en exécutant la commande suivante. Assurez-vous que <votre-adresse-email> correspond à l'adresse gmail que vous avez utilisée pour vous inscrire à cet événement. ```bash gcloud config set account <your-email-adress> ``` 3. Assurons-nous également que le bon projet est défini au cas où votre email serait utilisé pour plusieurs projets gcloud : ```bash gcloud config set project hf-flax ``` 4. Ensuite, vous devez vous authentifier. Vous pouvez le faire en exécutant la commande ```bash gcloud auth login ``` Vous devriez obtenir un lien vers un site web où vous pouvez authentifier votre compte gmail. 5. Enfin, vous pouvez établir un tunnel SSH dans la VM TPU ! Veuillez exécuter la commande suivante en réglant la "--zone" sur `us-central2-b` et sur le nom de la TPU qui vous a été envoyé par email par l'équipe de Hugging Face. ```bash gcloud alpha compute tpus tpu-vm ssh <tpu-name> --zone <zone> --project hf-flax ``` Cela devrait établir un tunnel SSH dans la VM TPU ! > 💡 Note : Vous n'êtes PAS supposé avoir accès à la console Google Cloud. Aussi, il se peut que vous ne receviez pas de lien d'invitation pour rejoindre le projet `hf-flax`. Mais vous devriez tout de même pouvoir accéder à la VM TPU en suivant les étapes ci-dessus . > Note : Les VM TPU sont déjà attachées à des disques de stockage persistants (de 3 TB). Cela sera utile au cas où votre équipe souhaiterait entraîner localement un jeu de données volumineux. Le nom du disque de stockage devrait également figurer dans l'e-mail que vous avez reçu. Suivez [cette section](https://github.com/huggingface/community-events/tree/main/jax-controlnet-sprint#mount-a-disk) pour plus de détails. ### Installation de JAX Commençons par créer un environnement virtuel Python : ```bash python3 -m venv <your-venv-name> ``` Nous pouvons activer l'environnement en lançant : ```bash source ~/<your-venv-name>/bin/activate ``` Installez ensuite Diffusers et les dépendances d'entraînement de la bibliothèque : ```bash pip install git+https://github.com/huggingface/diffusers.git ``` Ensuite, clonez ce dépôt et installez JAX, Flax et les autres dépendances : ```bash git clone https://github.com/huggingface/community-events cd community-events/jax-controlnet-sprint/training_scripts pip install -U -r requirements_flax.txt ``` Pour vérifier que JAX a été correctement installé, vous pouvez exécuter la commande suivante : ```py import jax jax.device_count() ``` Cela devrait afficher le nombre de cœurs de la TPU, qui devrait être de 4 sur une VM TPUv4-8. Si Python n'est pas capable de détecter le périphérique TPU, veuillez consulter la section des erreurs possibles plus bas pour des solutions. Si vous souhaitez utiliser le logging Weights and Biases, vous devez également installer `wandb` maintenant : ```bash pip install wandb ``` > 💡 Note : Weights & Biases est gratuit pour les étudiants, les éducateurs et les chercheurs universitaires. Tous les participants à notre événement sont qualifiés pour obtenir un compte d'équipe académique Weights & Biases. Pour créer votre équipe, vous pouvez visiter le site https://wandb.ai/create-team et choisir le type d'équipe "*Academic*". Pour plus d'informations sur la création et la gestion d'une équipe Weights & Biases, vous pouvez consulter le site https://docs.wandb.ai/guides/app/features/teams. ### Exécution du script d'entraînement Maintenant, téléchargeons deux images de conditionnement que nous utiliserons pour lancer la validation pendant l'entraînement afin de suivre nos progrès ```bash wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png ``` Nous vous encourageons à stocker ou à partager votre modèle avec la communauté. Pour utiliser le Hub, veuillez vous connecter à votre compte Hugging Face, ou ([en créer un](https://huggingface.co/docs/diffusers/main/en/training/hf.co/join) si vous n'en avez pas déjà un) : ```bash huggingface-cli login ``` Assurez-vous que les variables d'environnement `MODEL_DIR`, `OUTPUT_DIR` et `HUB_MODEL_ID` sont définies. Les variables `OUTPUT_DIR` et `HUB_MODEL_ID` spécifient où sauvegarder le modèle sur le Hub : ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="runs/fill-circle-{timestamp}" export HUB_MODEL_ID="controlnet-fill-circle" ``` Et enfin, démarrez l'entraînement (assurez-vous d'être dans le répertoire `jax-controlnet-sprint/training_scripts`) ! ```bash python3 train_controlnet_flax.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --validation_steps=1000 \ --train_batch_size=2 \ --revision="non-ema" \ --from_pt \ --report_to="wandb" \ --tracker_project_name=$HUB_MODEL_ID \ --num_train_epochs=11 \ --push_to_hub \ --hub_model_id=$HUB_MODEL_ID ``` Notez que l'argument `--from_pt` convertira votre point de contrôle pytorch en flax. Cependant, il ne fonctionnera qu'avec les points de contrôle au format diffusers. Si votre `MODEL_DIR` ne contient pas de points de contrôle au format diffusers, vous ne pouvez pas utiliser l'argument `--from_pt`. Vous pouvez convertir vos points de contrôle `ckpt` ou `safetensors` au format diffusers en utilisant [ce script] (https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py). Puisque nous avons passé l'argument `--push_to_hub`, il va automatiquement créer un repo de modèle sous votre compte Hugging Face basé sur `$HUB_MODEL_ID`. À la fin de l'entraînement, le point de contrôle final sera automatiquement stocké sur le Hub. Vous pouvez trouver un exemple de modèle [ici](https://huggingface.co/YiYiXu/fill-circle-controlnet). Notre script d'entraînement fournit également un support limité pour le streaming de grands jeux de données à partir du Hub. Afin d'activer le streaming, il faut également définir `--max_train_samples`. Voici un exemple de commande (tiré de [cet article de blog](https://huggingface.co/blog/train-your-controlnet)) : ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="runs/uncanny-faces-{timestamp}" export HUB_MODEL_ID="controlnet-uncanny-faces" python3 train_controlnet_flax.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=multimodalart/facesyntheticsspigacaptioned \ --streaming \ --conditioning_image_column=spiga_seg \ --image_column=image \ --caption_column=image_caption \ --resolution=512 \ --max_train_samples 100000 \ --learning_rate=1e-5 \ --train_batch_size=1 \ --revision="flax" \ --report_to="wandb" \ --tracker_project_name=$HUB_MODEL_ID ``` Notez cependant que les performances des TPUs peuvent être limitées car le streaming avec `datasets` n'est pas optimisé pour les images. Pour assurer un débit maximal, nous vous encourageons à explorer les options suivantes : * [Webdataset](https://webdataset.github.io/webdataset/) * [TorchData](https://github.com/pytorch/data) * [TensorFlow Datasets](https://www.tensorflow.org/datasets/tfless_tfds) Lorsque vous travaillez avec un jeu de données plus important, vous pouvez avoir besoin d'exécuter le processus d'entraînement pendant une longue période et il est utile d'enregistrer des points de contrôle réguliers au cours du processus. Vous pouvez utiliser l'argument suivant pour activer les points de contrôle intermédiaires : ```bash --checkpointing_steps=500 ``` Cela permet d'enregistrer le modèle entraîné dans des sous-dossiers du dossier output_dir. Le nom des sous-dossiers correspond au nombre d'étapes effectuées jusqu'à présent ; par exemple : un point de contrôle sauvegardé après 500 étapes d'entraînement serait sauvegardé dans un sous-dossier nommé 500 Vous pouvez alors commencer votre entraînement à partir de ce point de contrôle sauvegardé avec ```bash --controlnet_model_name_or_path="./control_out/500" ``` Nous soutenons l'entraînement avec la stratégie de pondération Min-SNR proposée dans [Efficient Diffusion Training via Min-SNR Weighting Strategy](https://arxiv.org/abs/2303.09556) qui permet d'obtenir une convergence plus rapide en rééquilibrant la perte. Pour l'utiliser, il faut définir l'argument `--snr_gamma`. La valeur recommandée est `5.0`. Nous supportons également l'accumulation de gradient, technique qui vous permet d'utiliser une taille de batch plus grande que celle que votre machine serait normalement capable de mettre en mémoire. Vous pouvez utiliser l'argument `gradient_accumulation_steps` pour définir les étapes d'accumulation du gradient. L'auteur de ControlNet recommande d'utiliser l'accumulation de gradient pour obtenir une meilleure convergence. Pour en savoir plus voir [ici](https://github.com/lllyasviel/ControlNet/blob/main/docs/train.md#more-consideration-sudden-converge-phenomenon-and-gradient-accumulation). Vous pouvez **profiler votre code** avec : ```bash --profile_steps==5 ``` Reportez-vous à la [documentation JAX sur le profilage](https://jax.readthedocs.io/en/latest/profiling.html). Pour inspecter la trace de profil, vous devez installer et démarrer Tensorboard avec le plugin de profil : ```bash pip install tensorflow tensorboard-plugin-profile tensorboard --logdir runs/fill-circle-100steps-20230411_165612/ ``` Le profil peut alors être inspecté à l'adresse http://localhost:6006/#profile. Parfois vous obtiendrez des conflits de version (messages d'erreur comme `Duplicate plugins for name projector`), ce qui signifie que vous devez désinstaller et réinstaller toutes les versions de Tensorflow/Tensorboard (par exemple avec `pip uninstall tensorflow tf-nightly tensorboard tb-nightly tensorboard-plugin-profile && pip install tf-nightly tbp-nightly tensorboard-plugin-profile`). Notez que la fonctionnalité de débogage du plugin Tensorboard `profile` est toujours en cours de développement. Toutes les vues ne sont pas entièrement fonctionnelles, et par exemple le `trace_viewer` coupe les événements après 1M (ce qui peut résulter en la perte de toutes vos traces de périphériques si par exemple vous profilez l'étape de compilation par accident). ### Dépannage de votre VM TPU **TRES IMPORTANT** : Un seul processus peut accéder aux cœurs de la TPU à la fois. Cela signifie que si plusieurs membres de l'équipe essaient de se connecter aux cœurs de la TPU, vous obtiendrez des erreurs telles que : ``` libtpu.so already in used by another process. Not attempting to load libtpu.so in this process. ``` Nous recommandons à chaque membre de l'équipe de créer son propre environnement virtuel, mais une seule personne devrait exécuter les processus d'entraînement lourds. De plus, veuillez vous relayer lors de l'installation de la TPUv4-8 afin que tout le monde puisse vérifier que JAX est correctement installé. Si les membres de votre équipe n'utilisent pas actuellement la TPU mais que vous obtenez toujours ce message d'erreur. Vous devez tuer le processus qui utilise la TPU avec : ``` kill -9 PID ``` vous devrez remplacer le terme "PID" par le PID du processus qui utilise TPU. Dans la plupart des cas, cette information est incluse dans le message d'erreur. Par exemple, si vous obtenez ``` The TPU is already in use by a process with pid 1378725. Not attempting to load libtpu.so in this process. ``` vous pouvez faire ``` kill -9 1378725 ``` Vous pouvez également utiliser la commande suivante pour trouver les processus utilisant chacune des puces TPU (par exemple, `/dev/accel0` est l'une des puces TPU) ``` sudo lsof -w /dev/accel0 ``` Pour tuer tous les processus à l'aide de `/dev/accel0`, il faut ``` sudo lsof -t /dev/accel0 | xargs kill -9 ``` Si Python n'est pas capable de détecter votre périphérique TPU (i.e. quand vous faites `jax.device_count()` et qu'il sort `0`), cela peut être dû au fait que vous n'avez pas les droits d'accès aux logs tpu, ou que vous avez un fichier tpu lock qui traîne. Exécutez les commandes suivantes pour résoudre le problème ``` sudo rm -f /tmp/libtpu_lockfile ``` ``` sudo chmod o+w /tmp/tpu_logs/ ``` ## Comment faire une soumission Pour faire une soumission complète, vous devez avoir les éléments suivants sur le Hub d'Hugging Face : - Un dépôt de modèle avec les poids du modèle et la carte du modèle, - (Facultatif) Un dépôt de jeu de données avec une carte de jeu de données, - Un *Space* qui permet aux autres d'interagir avec votre modèle. ### Pousser les poids du modèle et la carte du modèle vers le Hub **Si vous utilisez le script d'entraînement (`train_controlnet_flax.py`) fourni dans ce répertoire** L'activation de l'argument `push_to_hub` dans les arguments d'entraînement va : - Créer un dépôt de modèles localement et à distance sur le Hub, - Créer une carte de modèle et l'écrire dans le dépôt de modèles local, - Sauvegarder votre modèle dans le référentiel de modèles local, - Pousser le dépôt local vers le Hub. Votre carte de modèle générée automatiquement ressemblera à ceci : ![Carte de modèle](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/jax_model_card.png). Vous pouvez modifier la carte de modèle pour qu'elle soit plus informative. Les cartes de modèle qui sont plus informatives que les autres auront plus de poids lors de l'évaluation. **Si vous avez entraîné un modèle personnalisé et que vous n'avez pas utilisé le script** Vous devez vous authentifier avec `huggingface-cli login` comme indiqué ci-dessus. Si vous utilisez une des classes de modèles disponibles dans `diffusers`, sauvegardez votre modèle avec la méthode `save_pretrained` de votre modèle. ```py model.save_pretrained("path_to_your_model_repository") ``` Après avoir sauvegardé votre modèle dans un dossier, vous pouvez simplement utiliser le script ci-dessous pour pousser votre modèle vers le Hub : ```py from huggingface_hub import create_repo, upload_folder create_repo("username/my-awesome-model") upload_folder( folder_path="path_to_your_model_repository", repo_id="username/my-awesome-model" ) ``` Ceci poussera votre modèle vers Hub. Après avoir poussé cela, vous devez créer la carte de modèle vous-même. Vous pouvez utiliser l'interface graphique pour l'éditer. ![Edit Model Card](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/edit_model_card.png) Chaque carte de modèle se compose de deux sections, les métadonnées et le texte libre. Vous pouvez éditer les métadonnées à partir des sections dans l'interface graphique. Si vous avez sauvegardé votre modèle en utilisant `save_pretrained`, vous n'avez pas besoin de fournir `pipeline_tag` et `library_name`. Sinon, fournissez `pipeline_tag`, `library_name` et le jeu de données s'il existe sur Hugging Face Hub. En plus de cela, vous devez ajouter `jax-diffusers-event` à la section `tags`. ``` --- license: apache-2.0 library_name: diffusers tags: - jax-diffusers-event datasets: - red_caps pipeline_tag: text-to-image --- ``` ![Edit Metadata](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/edit_metadata.png) ### Créer notre *Space* <h4> Rédiger notre application </h4> Nous utiliserons [Gradio] (https://gradio.app/) pour créer nos applications. Gradio possède deux API principales : `Interface` et `Blocks`. `Interface` est une API de haut niveau qui vous permet de créer une interface avec quelques lignes de code, et `Blocks` est une API de plus bas niveau qui vous donne plus de flexibilité sur les interfaces que vous pouvez construire. Le code doit être inclus dans un fichier appelé `app.py`. Essayons de créer une application ControlNet comme exemple. L'API `Interface` fonctionne simplement comme suit : ```py import gradio as gr # La fonction d'inférence prend en compte le prompt, le prompt négatif et l'image def infer(prompt, negative_prompt, image): # implémentez votre fonction d'inférence ici return output_image # vous devez passer les entrées et les sorties en fonction de la fonction d'inférence gr.Interface(fn = infer, inputs = ["text", "text", "image"], outputs = "image").launch() ``` Vous pouvez personnaliser votre interface en passant `title`, `description` et `examples` à la fonction `Interface`. ```py title = "ControlNet on Canny Filter" description = "This is a demo on ControlNet based on canny filter." # vous devez passer vos exemples en fonction de vos entrées # chaque liste intérieure est un exemple, chaque élément de la liste correspondant à un composant des `inputs`. examples = [["a cat with cake texture", "low quality", "cat_image.png"]] gr.Interface(fn = infer, inputs = ["text", "text", "image"], outputs = "image", title = title, description = description, examples = examples, theme='gradio/soft').launch() ``` Votre interface ressemblera à ceci : ![ControlNet](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio_controlnet.png) Avec les blocs, vous pouvez ajouter des marques, des onglets, des composants sous les colonnes et les lignes, etc. Supposons que nous ayons deux ControlNets et que nous voulions les inclure dans un *Space*. Nous les placerons sous différents onglets dans une démo comme ci-dessous : ```py import gradio as gr def infer_segmentation(prompt, negative_prompt, image): # votre fonction d'inférence pour le contrôle de la segmentation return im def infer_canny(prompt, negative_prompt, image): # votre fonction d'inférence pour un contrôle efficace return im with gr.Blocks(theme='gradio/soft') as demo: gr.Markdown("## Stable Diffusion with Different Controls") gr.Markdown("In this app, you can find different ControlNets with different filters. ") with gr.Tab("ControlNet on Canny Filter "): prompt_input_canny = gr.Textbox(label="Prompt") negative_prompt_canny = gr.Textbox(label="Negative Prompt") canny_input = gr.Image(label="Input Image") canny_output = gr.Image(label="Output Image") submit_btn = gr.Button(value = "Submit") canny_inputs = [prompt_input_canny, negative_prompt_canny, canny_input] submit_btn.click(fn=infer_canny, inputs=canny_inputs, outputs=[canny_output]) with gr.Tab("ControlNet with Semantic Segmentation"): prompt_input_seg = gr.Textbox(label="Prompt") negative_prompt_seg = gr.Textbox(label="Negative Prompt") seg_input = gr.Image(label="Image") seg_output = gr.Image(label="Output Image") submit_btn = gr.Button(value = "Submit") seg_inputs = [prompt_input_seg, negative_prompt_seg, seg_input] submit_btn.click(fn=infer_segmentation, inputs=seg_inputs, outputs=[seg_output]) demo.launch() ``` La démo ci-dessus ressemblera à ce qui suit : ![Gradio Blocks](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio_controlnet_blocks.png) #### Créer notre *Space* Une fois notre application écrite, nous pouvons créer un espace Hugging Face pour héberger notre application. Vous pouvez aller sur [huggingface.co](http://huggingface.co), cliquer sur votre profil en haut à droite et sélectionner "*New Space*". ![New Space](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/new_space.png) Nous pouvons nommer notre *Space*, choisir une licence et sélectionner "Gradio" comme Space SDK. ![Space Configuration](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/space_config.png) Après avoir créé le *Space*, vous pouvez soit utiliser les instructions ci-dessous pour cloner le dépôt localement, ajouter vos fichiers et pousser, soit utiliser l'interface graphique pour créer les fichiers et écrire le code dans le navigateur. ![Spaces Landing](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/repository_landing.png) Pour télécharger votre fichier de candidature, cliquez sur "*Add File*" et faites glisser votre fichier. ![New Space Landing](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/add_file.png) Enfin, nous devons créer un fichier appelé `requirements.txt` et ajouter les conditions requises pour notre projet. Assurez-vous d'installer les versions de jax, diffusers et autres dépendances comme ci-dessous. ```py -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html jax[cuda11_cudnn805] jaxlib git+https://github.com/huggingface/diffusers@main opencv-python transformers flax ``` Nous vous accorderons une dotation GPU afin que votre application puisse fonctionner sur GPU. Nous avons un classement hébergé [ici] (https://huggingface.co/spaces/jax-diffusers-event/leaderboard) et nous distribuerons des prix à partir de ce classement. Pour que votre *Space* apparaisse sur le leaderboard, éditez simplement `README.md` de votre *Space* pour avoir le tag `jax-diffusers-event` sous les tags comme ci-dessous : ```py --- title: Canny Coyo1m emoji: 💜 ...py tags: - jax-diffusers-event --- ``` ## Prix Pour ce sprint, nous aurons de nombreux prix. Nous choisirons les dix premiers projets de [ce classement](https://huggingface.co/spaces/jax-diffusers-event/leaderboard), vous devez donc tagger votre *Space* pour le classement afin que votre soumission soit complète, comme indiqué dans la section ci-dessus. Les projets sont classés en fonction du nombre de j'aimes, nous augmenterons donc partagerons vos Spaces pour en augmenter la visibilité pour que les gens puissent voter en laissant un j'aime sur votre Space. Nous sélectionnerons les dix premiers projets du classement et le jury votera pour déterminer les trois premières places. Ces projets seront mis en valeur par Google et Hugging Face. Les interfaces élaborées ainsi que les projets dont les bases de code et les modèles sont en libre accès augmenteront probablement les chances de gagner des prix. Les prix sont les suivants et sont remis à chaque membre de l'équipe : **Première place** : Un bon d'achat de 150 $ à dépenser sur le [*Hugging Face Store*](https://store.huggingface.co/), un abonnement d'un an à Hugging Face Hub PRO, le livre *Natural Language Processing with Transformers*. **Deuxième place** : Un bon d'achat de 125$ à dépenser sur le [*Hugging Face Store*](https://store.huggingface.co/), un abonnement d'un an à Hugging Face Hub PRO. **Troisième place** : Un bon d'achat de 100 $ à dépenser sur le [*Hugging Face Store*](https://store.huggingface.co/), un abonnement d'un an à Hugging Face Hub PRO. Les dix premiers projets du classement (indépendamment de la décision du jury) gagneront un kit de merch exclusivement conçu pour ce sprint par Hugging Face, ainsi qu'un kit de merch séparé JAX de Google. ## Jury Le jury de ce sprint était composé des personnes suivantes : 1. Robin Rombach, Stability AI 2. Huiwen Chang, Google Research 3. Jun-Yan Zhu, Carnegie Mellon University 4. Merve Noyan, Hugging Face ## FAQ Dans cette section, nous rassemblons les réponses aux questions fréquemment posées sur notre canal discord. ### Comment utiliser VSCode avec TPU VM ? Vous pouvez suivre ce [guide général](https://medium.com/@ivanzhd/vscode-sftp-connection-to-compute-engine-on-google-cloud-platform-gcloud-9312797d56eb) sur la façon d'utiliser VSCode remote pour se connecter à Google Cloud VMs. Une fois que c'est configuré, vous pouvez développer sur la VM TPU en utilisant VSCode. Pour obtenir votre IP externe, utilisez cette commande : ```py gcloud compute tpus tpu-vm describe <node_name> --zone=<zone> ``` Elle devrait être listée sous 'accessConfig' -> 'externalIp' ### Comment tester votre code localement ? Puisque les membres de l'équipe partagent la VM TPU, il peut être pratique d'écrire et de tester votre code localement sur une unité centrale pendant que vos coéquipiers exécutent le processus d'entraînement sur la VM. Pour effectuer des tests locaux, il est important de mettre le drapeau `xla_force_host_platform_device_count` à `4`. Pour en savoir plus, consultez la [documentation] (https://jax.readthedocs.io/en/latest/jax-101/06-parallelism.html#aside-hosts-and-devices-in-jax). ## Gagnants du sprint Les 10 meilleurs projets (basés sur le nombre de likes sur leurs démos) sont disponibles sur ce [classement](https://huggingface.co/spaces/jax-diffusers-event/leaderboard). Nous avons soumis ce classement à notre jury pour qu'il juge les 10 meilleurs projets sur la base de plusieurs facteurs tels que les points de contrôle du modèle, les jeux de données et les bases de code open-source, l'exhaustivité du modèle et des cartes de jeux de données, etc. En conséquence, les trois projets suivants sont sortis vainqueurs : 1. [ControlNet pour la décoration intérieure](https://huggingface.co/spaces/controlnet-interior-design/controlnet-seg) 2. [ControlNet pour le réglage de la luminosité](https://huggingface.co/spaces/ioclab/brightness-controlnet) 3. [Stable Diffusion avec contrôle manuel](https://huggingface.co/spaces/vllab/controlnet-hands)
diffusion-models-class/units/fr/events/4.mdx/0
{ "file_path": "diffusion-models-class/units/fr/events/4.mdx", "repo_id": "diffusion-models-class", "token_count": 15277 }
142
<jupyter_start><jupyter_text>Traduction (TensorFlow) Installez les bibliothèques 🤗 *Datasets* et 🤗 *Transformers* pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece] !apt install git-lfs<jupyter_output>Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/ Collecting datasets Downloading datasets-2.6.1-py3-none-any.whl (441 kB)  |████████████████████████████████| 441 kB 5.2 MB/s [?25hCollecting transformers[sentencepiece] Downloading transformers-4.23.1-py3-none-any.whl (5.3 MB)  |████████████████████████████████| 5.3 MB 38.1 MB/s [?25hRequirement already satisfied: dill<0.3.6 in /usr/local/lib/python3.7/dist-packages (from datasets) (0.3.5.1) Collecting responses<0.19 Downloading responses-0.18.0-py3-none-any.whl (38 kB) Requirement already satisfied: pyarrow>=6.0.0 in /usr/local/lib/python3.7/dist-packages (from datasets) (6.0.1) Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from datasets) (1.21.6) Requirement already satisfied: fsspec[http]>=2021.11.1 in /usr/local/lib/python3.7/dist-packages (from datasets) (2022.8.2) Requirement already satisfied: aiohttp in /usr/local/lib[...]<jupyter_text>Vous aurez besoin de configurer git, adaptez votre email et votre nom dans la cellule suivante.<jupyter_code>!git config --global user.email "[email protected]" !git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Vous devrez également être connecté au Hub d'Hugging Face. Exécutez ce qui suit et entrez vos informations d'identification.<jupyter_code>from huggingface_hub import notebook_login notebook_login() from datasets import load_dataset, load_metric raw_datasets = load_dataset("kde4", lang1="en", lang2="fr") raw_datasets split_datasets = raw_datasets["train"].train_test_split(train_size=0.9, seed=20) split_datasets split_datasets["validation"] = split_datasets.pop("test") split_datasets["train"][1]["translation"] from transformers import pipeline model_checkpoint = "Helsinki-NLP/opus-mt-en-fr" translator = pipeline("translation", model=model_checkpoint) translator("Default to expanded threads") split_datasets["train"][172]["translation"] translator( "Unable to import %1 using the OFX importer plugin. This file is not the correct format." ) from transformers import AutoTokenizer model_checkpoint = "Helsinki-NLP/opus-mt-en-fr" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, return_tensors="tf") en_sentence = split_datasets["train"][1]["translation"]["en"] fr_sentence = split_datasets["train"][1]["translation"]["fr"] inputs = tokenizer(en_sentence) with tokenizer.as_target_tokenizer(): targets = tokenizer(fr_sentence) wrong_targets = tokenizer(fr_sentence) print(tokenizer.convert_ids_to_tokens(wrong_targets["input_ids"])) print(tokenizer.convert_ids_to_tokens(targets["input_ids"])) max_input_length = 128 max_target_length = 128 def preprocess_function(examples): inputs = [ex["en"] for ex in examples["translation"]] targets = [ex["fr"] for ex in examples["translation"]] model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True) # Configurer le tokenizer pour les cibles with tokenizer.as_target_tokenizer(): labels = tokenizer(targets, max_length=max_target_length, truncation=True) model_inputs["labels"] = labels["input_ids"] return model_inputs tokenized_datasets = split_datasets.map( preprocess_function, batched=True, remove_columns=split_datasets["train"].column_names, ) from transformers import TFAutoModelForSeq2SeqLM model = TFAutoModelForSeq2SeqLM.from_pretrained(model_checkpoint, from_pt=True) from transformers import DataCollatorForSeq2Seq data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="tf") batch = data_collator([tokenized_datasets["train"][i] for i in range(1, 3)]) batch.keys() batch["labels"] batch["decoder_input_ids"] for i in range(1, 3): print(tokenized_datasets["train"][i]["labels"]) tf_train_dataset = model.prepare_tf_dataset( tokenized_datasets["train"], collate_fn=data_collator, shuffle=True, batch_size=32, ) tf_eval_dataset = model.prepare_tf_dataset( tokenized_datasets["validation"], collate_fn=data_collator, shuffle=False, batch_size=16, ) !pip install sacrebleu from datasets import load_metric metric = load_metric("sacrebleu") predictions = [ "This plugin lets you translate web pages between several languages automatically." ] references = [ [ "This plugin allows you to automatically translate web pages between several languages." ] ] metric.compute(predictions=predictions, references=references) predictions = ["This This This This"] references = [ [ "This plugin allows you to automatically translate web pages between several languages." ] ] metric.compute(predictions=predictions, references=references) predictions = ["This plugin"] references = [ [ "This plugin allows you to automatically translate web pages between several languages." ] ] metric.compute(predictions=predictions, references=references) import numpy as np import tensorflow as tf from tqdm import tqdm generation_data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, return_tensors="tf", pad_to_multiple_of=128 ) tf_generate_dataset = model.prepare_tf_dataset( tokenized_datasets["validation"], collate_fn=generation_data_collator, shuffle=False, batch_size=8, ) @tf.function(jit_compile=True) def generate_with_xla(batch): return model.generate( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], max_new_tokens=128, ) def compute_metrics(): all_preds = [] all_labels = [] for batch, labels in tqdm(tf_generate_dataset): predictions = generate_with_xla(batch) for batch in tf_generate_dataset: predictions = model.generate( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"] ) labels = labels.numpy() labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) decoded_preds = [pred.strip() for pred in decoded_preds] decoded_labels = [[label.strip()] for label in decoded_labels] all_preds.extend(decoded_preds) all_labels.extend(decoded_labels) result = metric.compute(predictions=all_preds, references=all_labels) return {"bleu": result["score"]} from huggingface_hub import notebook_login notebook_login() print(compute_metrics()) from transformers import create_optimizer from transformers.keras_callbacks import PushToHubCallback import tensorflow as tf # Le nombre d'étapes d'entraînement est le nombre d'échantillons dans le jeu de données, divisé par la taille du batch puis multiplié # par le nombre total d'époques. Notez que le jeu de données tf_train_dataset est ici un lot de données tf.data.Dataset, # pas le jeu de données original Hugging Face, donc son len() est déjà num_samples // batch_size. num_epochs = 3 num_train_steps = len(tf_train_dataset) * num_epochs optimizer, schedule = create_optimizer( init_lr=5e-5, num_warmup_steps=0, num_train_steps=num_train_steps, weight_decay_rate=0.01, ) model.compile(optimizer=optimizer) # Entraîner en mixed-precision float16 tf.keras.mixed_precision.set_global_policy("mixed_float16") from transformers.keras_callbacks import PushToHubCallback callback = PushToHubCallback( output_dir="marian-finetuned-kde4-en-to-fr", tokenizer=tokenizer ) model.fit( tf_train_dataset, validation_data=tf_eval_dataset, callbacks=[callback], epochs=num_epochs, ) print(compute_metrics()) from transformers import pipeline # Remplacer par votre propre checkpoint model_checkpoint = "huggingface-course/marian-finetuned-kde4-en-to-fr" translator = pipeline("translation", model=model_checkpoint) translator("Default to expanded threads") translator( "Unable to import %1 using the OFX importer plugin. This file is not the correct format." )<jupyter_output><empty_output>
notebooks/course/fr/chapter7/section4_tf.ipynb/0
{ "file_path": "notebooks/course/fr/chapter7/section4_tf.ipynb", "repo_id": "notebooks", "token_count": 3062 }
143
<jupyter_start><jupyter_text>Integrations avec le *Hub* d'Hugging Face Installez les bibliothèques 🤗 Transformers et 🤗 Gradio pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece] !pip install gradio import gradio as gr title = "GPT-J-6B (Boris)" description = "Démo Gradio pour GPT-J 6B (Boris), un transformer entraîné à l'aide du Mesh Transformer JAX de Ben Wang. GPT-J' fait référence à la classe du modèle, tandis que '6B' représente le nombre de paramètres entraînables. Pour l'utiliser, il suffit d'ajouter votre texte, ou de cliquer sur l'un des exemples pour le charger. Pour en savoir plus, consultez les liens ci-dessous." article = "<p style='text-align: center'><a href='https://github.com/kingoflolz/mesh-transformer-jax' target='_blank'>GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model</a></p>" examples = [ ["La tour mesure 324 mètres (1 063 pieds) de haut,"], ["L'orbite de la Lune autour de la Terre a"], ["Le bassin lisse de Borealis dans l'hémisphère nord couvre 40 %"], ] gr.Interface.load( # "huggingface/Cedille/fr-boris", # C'est un très gros modèle, le temps d'éxécution peut être très long (voire planter) ! "huggingface/asi/gpt-fr-cased-small", # Alternative beaucoup plus légère inputs=gr.Textbox(lines=5, label="Texte d'entrée"), title=title, description=description, article=article, examples=examples, enable_queue=True, ).launch() gr.Interface.load("spaces/abidlabs/remove-bg").launch() gr.Interface.load( "spaces/abidlabs/remove-bg", inputs="webcam", title="Supprimez l'arrière-plan de votre webcam !" ).launch()<jupyter_output><empty_output>
notebooks/course/fr/chapter9/section5.ipynb/0
{ "file_path": "notebooks/course/fr/chapter9/section5.ipynb", "repo_id": "notebooks", "token_count": 653 }
144
<jupyter_start><jupyter_text>**The Stable Diffusion Guide** 🎨 *...using `🧨 diffusers`* **Intro**Stable Diffusion is a [Latent Diffusion model](https://github.com/CompVis/latent-diffusion) developed by researchers from the Machine Vision and Learning group at LMU Munich, *a.k.a* CompVis.Model checkpoints were publicly released at the end of August by a collaboration of Stability AI, CompVis, and Runway with support from EleutherAI and LAION. For more information, you can check out [the official blog post](https://stability.ai/blog/stable-diffusion-public-release).Since its public release the community has done an incredible job at working together to make the stable diffusion checkpoints **faster**, **more memory efficient**, and **more performant**.[🧨 Diffusers](https://github.com/huggingface/diffusers) offers a simple API to run stable diffusion with all memory, compute and quality improvements.This notebook walks you through the improvements one-by-one and you can best leverage *Stable Diffusion* for **inference**. SetupFirst, please make sure you are using a GPU runtime to run this notebook, so inference is much faster. If the following command fails, use the `Runtime` menu above and select `Change runtime type`.<jupyter_code>!nvidia-smi<jupyter_output>Thu Jan 5 10:04:48 2023 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 | | N/A 72C P0 30W / 70W | 9870MiB / 15109MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-------[...]<jupyter_text>Next, you should install `diffusers` as well `scipy`, `ftfy` and `transformers`.<jupyter_code>!pip install diffusers[torch]==0.11.1 transformers scipy ftfy accelerate<jupyter_output>Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/ Collecting diffusers[torch] Downloading diffusers-0.11.1-py3-none-any.whl (524 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 524.9/524.9 KB 9.6 MB/s eta 0:00:00 [?25hCollecting transformers Downloading transformers-4.25.1-py3-none-any.whl (5.8 MB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.8/5.8 MB 71.7 MB/s eta 0:00:00 [?25hRequirement already satisfied: scipy in /usr/local/lib/python3.8/dist-packages (1.7.3) Collecting ftfy Downloading ftfy-6.1.1-py3-none-any.whl (53 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 53.1/53.1 KB 7.7 MB/s eta 0:00:00 [?25hCollecting accelerate Downloading accelerate-0.15.0-py3-none-any.whl (191 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 191.5/191.5 KB 774.2 kB/s eta 0:00:00 [...]<jupyter_text>Prompt Engineering 🎨When running *Stable Diffusion* in inference, we usually want to generate a certain type, style of image and then improve upon it. Improving upon a previously generated image means running inference over and over again with a different prompt and potentially a different seed until we are happy with our generation. So to begin with, it is most important to speed up stable diffusion as much as possible to generate as many pictures as possible in a given amount of time. This can be done by both improving the **computational efficiency** (speed) and the **memory efficiency** (GPU RAM).Let's start by looking into the computational efficiency first. Through-out the notebook, we will focus on [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5):<jupyter_code>model_id = "runwayml/stable-diffusion-v1-5"<jupyter_output><empty_output><jupyter_text>Let's load the pipeline as defined on https://huggingface.co/runwayml/stable-diffusion-v1-5 . Speed Optimization<jupyter_code>from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained(model_id)<jupyter_output><empty_output><jupyter_text>We aim at generating a beautiful photograph of an *old warrior chief* and will later try to find the best prompt to generate such a photograph. For now, let's keep the prompt simple:<jupyter_code>prompt = "portrait photo of a old warrior chief"<jupyter_output><empty_output><jupyter_text>To begin with, we should make sure we run inference on GPU, so let's move the pipeline to GPU, just like you would with any PyTorch module.<jupyter_code>pipe = pipe.to("cuda")<jupyter_output><empty_output><jupyter_text>To generate an image, you should use the `__call__` method. You can have a look at its docstring to better understand what arguments can be passed [here](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/text2imgdiffusers.StableDiffusionPipeline.__call__). To make sure we can reproduce more or less the same image in every call, let's make use of the generator. See documentation on reproducibality [here]( ) for more information.<jupyter_code>import torch generator = torch.Generator("cuda").manual_seed(0)<jupyter_output><empty_output><jupyter_text>Now, let's take a spin on it.<jupyter_code>image = pipe(prompt, generator=generator).images[0] image<jupyter_output><empty_output><jupyter_text>Cool, this now took roughly 30 seconds on a T4 GPU (you might see faster inference if you're allocated GPU is better than a T4).The default run we did above used full float32 precision and run the default 50 inference steps. The easiest speed-ups come from switiching to float16 (or half) precision and simply running less inference steps. Let's load the model now instead in float16.<jupyter_code>pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda")<jupyter_output><empty_output><jupyter_text>And we can again call the pipeline to generate an image.<jupyter_code>generator = torch.Generator("cuda").manual_seed(0) image = pipe(prompt, generator=generator).images[0] image<jupyter_output><empty_output><jupyter_text>Cool, this is almost three times as fast for arguably the exact same image quality. We strongly suggest to always run your pipelines in float16 as so far we have very rarely seen degradations in quality because of it.Next, let's see if we really need to use 50 inference steps or whether we could use significantly less. Let's have a look at all schedulers, the stable diffusion pipeline is compatible with.<jupyter_code>pipe.scheduler.compatibles<jupyter_output><empty_output><jupyter_text>Cool, that's a lot of schedulers.🧨 Diffusers is constantely adding a bunch of novel schedulers/samplers that can be used with Stable Diffusion. For more information, we recommend to take a look at the official documentation [here](https://huggingface.co/docs/diffusers/main/en/api/schedulers/overview).Alright, right now Stable Diffusion is using the `PNDMScheduler` which usually requires around 50 inference steps. However, other schedulers such as `DPMSolverMultistepScheduler` or `DPMSolverSinglestepScheduler` seem to get away with just 20 to 25 inference steps. Let's try them out. You can set a new scheduler by making use of the [from_config](https://huggingface.co/docs/diffusers/main/en/api/configurationdiffusers.ConfigMixin.from_config) function.<jupyter_code>from diffusers import DPMSolverMultistepScheduler pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)<jupyter_output><empty_output><jupyter_text>Now, let's try to reduce the number of inference steps to just 20.<jupyter_code>generator = torch.Generator("cuda").manual_seed(0) image = pipe(prompt, generator=generator, num_inference_steps=20).images[0] image<jupyter_output><empty_output><jupyter_text>The image now does look a little different, but it's arguably still of equally high quality. We now cut inference time to just 4 seconds though 😍. Memory Optimization More memory indirectly also means more speed since we're often trying to maximize how many images we can generate per second, so more images per inference, more images per second usually. The easiest way to see how much we can increase memory is to simply try it out, and see when we get a *"Out-of-memory (OOM)"* error.One can run batched inference by simply passing a list of prompts and generators. Let's define a quick function that generates a batch for us.<jupyter_code>def get_inputs(batch_size=1): generator = [torch.Generator("cuda").manual_seed(i) for i in range(batch_size)] prompts = batch_size * [prompt] num_inference_steps = 20 return {"prompt": prompts, "generator": generator, "num_inference_steps": num_inference_steps}<jupyter_output><empty_output><jupyter_text>We also need a method that allows us to easily display a batch of images.<jupyter_code>from PIL import Image def image_grid(imgs, rows=2, cols=2): w, h = imgs[0].size grid = Image.new('RGB', size=(cols*w, rows*h)) for i, img in enumerate(imgs): grid.paste(img, box=(i%cols*w, i//cols*h)) return grid<jupyter_output><empty_output><jupyter_text>Cool, let's see how much memory we can use starting with batch_size=4.<jupyter_code>images = pipe(**get_inputs(batch_size=4)).images image_grid(images)<jupyter_output><empty_output><jupyter_text>Going over a batch_size of 4 will error out in this colab. Also we can see we only generate slighly more images per second (3.75s/image) compared to 4s/image previously.However, the community has found some nice tricks to improve the memory constraints further${}^1$. By far most of the memory is taken up by the cross attention layers. Instead of running this operation in batch, one can run it sequentially to save a significant amout of memory.It can easily be enabled by calling `enable_attention_slicing` as is documented [here](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/text2imgdiffusers.StableDiffusionPipeline.enable_attention_slicing).---${}^1$ After stable diffusion was relesaed, the community found improvements within days and shared the freely over GitHub - open-source at its finest!I believe theh original idea came from [this](https://github.com/basujindal/stable-diffusion/pull/117). GitHub thread.<jupyter_code>pipe.enable_attention_slicing()<jupyter_output><empty_output><jupyter_text>Great now that attention slicing is enabled, let's try to double the batch size again, going for `batch_size=8`.<jupyter_code>images = pipe(**get_inputs(batch_size=8)).images image_grid(images, rows=2, cols=4)<jupyter_output><empty_output><jupyter_text>Nice, it works. However, the speed gain is again not very big (it might however be much more significant on other GPUs).We're at roughly 3.5 seconds per image 🔥 which is probably the fastest we can be with a simple T4 without sacrificing quality.Next, let's look into how to improve the quality! Quality ImprovementsNow that our image generation pipeline is blazing fast, let's try to get maximum image quality.First of all, image quality is extremely subjective, so it's difficult to make general claims here.The most obvious step to take to improve quality is to use *better checkpoints*. Since the release of Stable Diffusion many improved versions have been released, which are summarized here:- *Official Release - 22 Aug 2022*: [Stable-Diffusion 1.4](https://huggingface.co/CompVis/stable-diffusion-v1-4)- *20 October 2022*: [Stable-Diffusion 1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5)- *24 Nov 2022*: [Stable-Diffusion 2.0](https://huggingface.co/stabilityai/stable-diffusion-2-0)- *7 Dec 2022*: [Stable-Diffusion 2.1](https://huggingface.co/stabilityai/stable-diffusion-2-1)Newer versions don't necessarily mean better image quality with the same parameters. People mentioned that *2.0* is slighly worse than *1.5* for certain prompts, but given the right prompt engineering *2.0* and *2.1* seem to be better. Overall, we strongly recommend to just try the models out and read up on advice online $^{2}$Additionally, the community has started fine-tuning many of the above versions on certain styles with some of them having an extremely high quality and gaining a lot of traction. We recommend to simply have a look at all [diffusers checkpoints sorted by downloads and try out the different checkpoints](https://huggingface.co/models?library=diffusers).For the following, we will stick to v1.5 for simplicity.---$^{2}$ E.g. it has been shown that using negative prompts is very important for 2.0 and 2.1 to get the highest possible quality. See for example [this nice blog post](https://minimaxir.com/2022/11/stable-diffusion-negative-prompt/). Next, we can also try to optimize single components of the pipeline, e.g. switching out the latent decoder. For more details on how the whole Stable Diffusion pipeline works, please have a look at [this blog post](https://huggingface.co/blog/stable_diffusion).Let's load [stabilityai's newest auto-decoder](https://huggingface.co/stabilityai/stable-diffusion-2-1).<jupyter_code>from diffusers import AutoencoderKL vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16).to("cuda")<jupyter_output><empty_output><jupyter_text>Now we can set it to the vae of the pipeline to use it.<jupyter_code>pipe.vae = vae<jupyter_output><empty_output><jupyter_text>Let's run the same prompt as before to compare quality.<jupyter_code>images = pipe(**get_inputs(batch_size=8)).images image_grid(images, rows=2, cols=4)<jupyter_output><empty_output><jupyter_text>Seems like the difference is only very minor, but the new generations are arguably a bit *sharper*. Cool, finally, let's look a bit into prompt engineering.Our goal was to generate a photo of an old warrior chief. Let's now try to bring a bit more color into the photos and make the look more impressive.Originally our prompt was "*portrait photo of a old warrior chief*".To improve the prompt, it often helps to add cues that could have been used online to save high quality photos, as well as adding more details since.Essentially, when doing prompt engineering, one has to think:- How was the photo or similar photos of the one I want probably stored on the internet?- What additional detail can I give that steers the models into the style that I want.Cool, let's add more details.<jupyter_code>prompt += ", tribal panther make up, blue on red, side profile, looking away, serious eyes"<jupyter_output><empty_output><jupyter_text>and let's also add some cues that usually help to generate higher quality images.<jupyter_code>prompt += " 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta" prompt<jupyter_output><empty_output><jupyter_text>Cool, let's now try this prompt.<jupyter_code>images = pipe(**get_inputs(batch_size=8)).images image_grid(images, rows=2, cols=4)<jupyter_output><empty_output><jupyter_text>Pretty impressive! We got definitely some very high quality image generations there. The 2nd image is my personal favorite, so I'll re-use this seed and see whether I can tweak the prompts slighly by using "oldest warrior", "old", "", and "young" instead of "old".<jupyter_code>prompts = [ "portrait photo of the oldest warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a old warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a young warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", ] generator = [torch.Generator("cuda").manual_seed(1) for _ in range(len(prompts))] # 1 because we want the 2nd image images = pipe(prompt=prompts, generator=generator, num_inference_steps=25).images image_grid(images)<jupyter_output><empty_output>
notebooks/diffusers/sd_101_guide.ipynb/0
{ "file_path": "notebooks/diffusers/sd_101_guide.ipynb", "repo_id": "notebooks", "token_count": 5420 }
145
""" On one node, launch with `deepspeed --num_gpus N idefics_zero3_finetuning.py` by replacing N with the number of your GPUs For several nodes, using Slurm, a template script is provided at `examples/idefics/idefics_zero3_finetuning/slurm_script_idefics_zero3_finetuning_multinode.slurm` For more information, follow the tutorial on using DeepSpeed with Transformers at https://huggingface.co/docs/transformers/main_classes/deepspeed """ import torch import torchvision.transforms as transforms from datasets import load_dataset from PIL import Image from transformers import AutoProcessor, IdeficsForVisionText2Text, Trainer, TrainingArguments device = "cuda" if torch.cuda.is_available() else "cpu" checkpoint = "HuggingFaceM4/idefics-9b" processor = AutoProcessor.from_pretrained(checkpoint, use_auth_token=True) def convert_to_rgb(image): # `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background # for transparent images. The call to `alpha_composite` handles this case if image.mode == "RGB": return image image_rgba = image.convert("RGBA") background = Image.new("RGBA", image_rgba.size, (255, 255, 255)) alpha_composite = Image.alpha_composite(background, image_rgba) alpha_composite = alpha_composite.convert("RGB") return alpha_composite def ds_transforms(example_batch): image_size = processor.image_processor.image_size image_mean = processor.image_processor.image_mean image_std = processor.image_processor.image_std image_transform = transforms.Compose( [ convert_to_rgb, transforms.RandomResizedCrop( (image_size, image_size), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize(mean=image_mean, std=image_std), ] ) prompts = [] for i in range(len(example_batch["caption"])): # We split the captions to avoid having very long examples, which would require more GPU ram during training caption = example_batch["caption"][i].split(".")[0] try: # There are a handful of images that are not hosted anymore. This is a small (dummy) hack to skip these processor.image_processor.fetch_images(example_batch["image_url"][i]) except Exception: print( "Warning: at least one image couldn't be retrieved from the internet in an example. Skipping the" " batch." ) prompts.append( [ example_batch["image_url"][i], f"Question: What's on the picture? Answer: This is {example_batch['name'][i]}. {caption}</s>", ], ) inputs = processor(prompts, transform=image_transform, return_tensors="pt").to(device) inputs["labels"] = inputs["input_ids"] return inputs # load and prepare dataset ds = load_dataset("TheFusion21/PokemonCards") ds = ds["train"].train_test_split(test_size=0.002) train_ds = ds["train"] eval_ds = ds["test"] train_ds.set_transform(ds_transforms) eval_ds.set_transform(ds_transforms) # Important, define the training_args before the model ds_config = { "communication_data_type": "fp32", "bf16": {"enabled": True}, "zero_optimization": { "stage": 3, "overlap_comm": False, "reduce_bucket_size": "auto", "contiguous_gradients": True, "stage3_gather_16bit_weights_on_model_save": False, "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 2e9, "stage3_max_reuse_distance": 2e9, "offload_optimizer": {"device": "none"}, "offload_param": {"device": "none"}, }, "gradient_clipping": "auto", "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "steps_per_print": 2000000, } training_args = TrainingArguments( output_dir="idefics-pokemon", learning_rate=2e-4, bf16=True, per_device_train_batch_size=1, per_device_eval_batch_size=1, gradient_accumulation_steps=1, # gradient_checkpointing=True, # Uncomment if OOM dataloader_pin_memory=False, save_total_limit=3, evaluation_strategy="steps", save_strategy="steps", save_steps=40, eval_steps=20, logging_steps=20, max_steps=40, remove_unused_columns=False, push_to_hub=False, label_names=["labels"], load_best_model_at_end=True, report_to="none", optim="adamw_torch", deepspeed=ds_config, ) model = IdeficsForVisionText2Text.from_pretrained(checkpoint, torch_dtype=torch.bfloat16) trainer = Trainer( model=model, args=training_args, train_dataset=train_ds, eval_dataset=eval_ds, ) result = trainer.train() print(result) # Prints one per process - mostly here for sanity check
notebooks/examples/idefics/idefics_zero3_finetuning/idefics_zero3_finetuning.py/0
{ "file_path": "notebooks/examples/idefics/idefics_zero3_finetuning/idefics_zero3_finetuning.py", "repo_id": "notebooks", "token_count": 1980 }
146
<jupyter_start><jupyter_text>PatchTSMixer in HuggingFace - Getting Started `PatchTSMixer` is a lightweight time-series modeling approach based on the MLP-Mixer architecture. It is proposed in [TSMixer: Lightweight MLP-Mixer Model for Multivariate Time Series Forecasting](https://huggingface.co/papers/2306.09364) by IBM Research authors Vijay Ekambaram, Arindam Jati, Nam Nguyen, Phanwadee Sinthong and Jayant Kalagnanam.For effective mindshare and to promote open-sourcing - IBM Research joins hands with the HuggingFace team to release this model in the Transformers library.In the [Hugging Face implementation](https://huggingface.co/docs/transformers/main/en/model_doc/patchtsmixer), we provide PatchTSMixer’s capabilities to effortlessly facilitate lightweight mixing across patches, channels, and hidden features for effective multivariate time-series modeling. It also supports various attention mechanisms starting from simple gated attention to more complex self-attention blocks that can be customized accordingly. The model can be pretrained and subsequently used for various downstream tasks such as forecasting, classification, and regression.`PatchTSMixer` outperforms state-of-the-art MLP and Transformer models in forecasting by a considerable margin of 8-60%. It also outperforms the latest strong benchmarks of Patch-Transformer models (by 1-2%) with a significant reduction in memory and runtime (2-3X). For more details, refer to the [paper](https://arxiv.org/pdf/2306.09364.pdf).In this blog, we will demonstrate examples of getting started with PatchTSMixer. We will first demonstrate the forecasting capability of `PatchTSMixer` on the Electricity dataset. We will then demonstrate the transfer learning capability of PatchTSMixer by using the model trained on Electricity to do zero-shot forecasting on the `ETTH2` dataset. InstallationThis demo requires Hugging Face [`Transformers`](https://github.com/huggingface/transformers) for the model and the IBM `tsfm` package for auxiliary data pre-processing.Both can be installed by following the steps below.1. Install IBM Time Series Foundation Model Repository [`tsfm`](https://github.com/ibm/tsfm).```pip install git+https://github.com:IBM/tsfm.git```2. Install Hugging Face [`Transformers`](https://github.com/huggingface/transformersinstallation)```pip install transformers```3. Test it with the following commands in a `python` terminal.```from transformers import PatchTSMixerConfigfrom tsfm_public.toolkit.dataset import ForecastDFDataset```<jupyter_code>!pip install git+https://github.com:IBM/tsfm.git transformers from transformers import PatchTSMixerConfig from tsfm_public.toolkit.dataset import ForecastDFDataset<jupyter_output><empty_output><jupyter_text>Part 1: Forecasting on Electricity dataset<jupyter_code>import os import random from transformers import ( EarlyStoppingCallback, PatchTSMixerConfig, PatchTSMixerForPrediction, Trainer, TrainingArguments, ) import numpy as np import pandas as pd import torch from tsfm_public.toolkit.dataset import ForecastDFDataset from tsfm_public.toolkit.time_series_preprocessor import TimeSeriesPreprocessor from tsfm_public.toolkit.util import select_by_index<jupyter_output>/dccstor/dnn_forecasting/conda_envs/envs/hf/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html from .autonotebook import tqdm as notebook_tqdm 2023-12-11 01:25:50.313015: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered 2023-12-11 01:25:50.313102: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered 2023-12-11 01:25:50.313132: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered 2023-12-11 01:25:51.234452: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] [...]<jupyter_text>Set seed<jupyter_code>from transformers import set_seed set_seed(42)<jupyter_output><empty_output><jupyter_text>Load and prepare datasetsIn the next cell, please adjust the following parameters to suit your application:- `dataset_path`: path to local .csv file, or web address to a csv file for the data of interest. Data is loaded with pandas, so anything supported by`pd.read_csv` is supported: (https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html).- `timestamp_column`: column name containing timestamp information, use `None` if there is no such column.- `id_columns`: List of column names specifying the IDs of different time series. If no ID column exists, use `[]`.- `forecast_columns`: List of columns to be modeled.- `context_length`: The amount of historical data used as input to the model. Windows of the input time series data with length equal to`context_length` will be extracted from the input dataframe. In the case of a multi-time series dataset, the context windows will be createdso that they are contained within a single time series (i.e., a single ID).- `forecast_horizon`: Number of timestamps to forecast in the future.- `train_start_index`, `train_end_index`: the start and end indices in the loaded data which delineate the training data.- `valid_start_index`, `valid_end_index`: the start and end indices in the loaded data which delineate the validation data.- `test_start_index`, `test_end_index`: the start and end indices in the loaded data which delineate the test data.- `num_workers`: Number of CPU workers in the PyTorch dataloader.- `batch_size`: Batch size.The data is first loaded into a Pandas dataframe and split into training, validation, and test parts. Then the Pandas dataframes are converted to the appropriate PyTorch dataset required for training.<jupyter_code>PRETRAIN_AGAIN = True # Download ECL data from https://github.com/zhouhaoyi/Informer2020 dataset_path = "~/Downloads/ECL.csv" timestamp_column = "date" id_columns = [] context_length = 512 forecast_horizon = 96 patch_length = 8 num_workers = 16 # Reduce this if you have low number of CPU cores batch_size = 64 # Adjust according to GPU memory if PRETRAIN_AGAIN: data = pd.read_csv( dataset_path, parse_dates=[timestamp_column], ) forecast_columns = list(data.columns[1:]) # get split num_train = int(len(data) * 0.7) num_test = int(len(data) * 0.2) num_valid = len(data) - num_train - num_test border1s = [ 0, num_train - context_length, len(data) - num_test - context_length, ] border2s = [num_train, num_train + num_valid, len(data)] train_start_index = border1s[0] # None indicates beginning of dataset train_end_index = border2s[0] # we shift the start of the evaluation period back by context length so that # the first evaluation timestamp is immediately following the training data valid_start_index = border1s[1] valid_end_index = border2s[1] test_start_index = border1s[2] test_end_index = border2s[2] train_data = select_by_index( data, id_columns=id_columns, start_index=train_start_index, end_index=train_end_index, ) valid_data = select_by_index( data, id_columns=id_columns, start_index=valid_start_index, end_index=valid_end_index, ) test_data = select_by_index( data, id_columns=id_columns, start_index=test_start_index, end_index=test_end_index, ) tsp = TimeSeriesPreprocessor( context_length=context_length, timestamp_column=timestamp_column, id_columns=id_columns, input_columns=forecast_columns, output_columns=forecast_columns, scaling=True, ) tsp.train(train_data) if PRETRAIN_AGAIN: train_dataset = ForecastDFDataset( tsp.preprocess(train_data), id_columns=id_columns, timestamp_column="date", input_columns=forecast_columns, output_columns=forecast_columns, context_length=context_length, prediction_length=forecast_horizon, ) valid_dataset = ForecastDFDataset( tsp.preprocess(valid_data), id_columns=id_columns, timestamp_column="date", input_columns=forecast_columns, output_columns=forecast_columns, context_length=context_length, prediction_length=forecast_horizon, ) test_dataset = ForecastDFDataset( tsp.preprocess(test_data), id_columns=id_columns, timestamp_column="date", input_columns=forecast_columns, output_columns=forecast_columns, context_length=context_length, prediction_length=forecast_horizon, )<jupyter_output><empty_output><jupyter_text>Configure the PatchTSMixer modelNext, we instantiate a randomly initialized PatchTSMixer model with a configuration. The settings below control the different hyperparameters related to the architecture. - `num_input_channels`: the number of input channels (or dimensions) in the time series data. This is automatically set to the number for forecast columns. - `context_length`: As described above, the amount of historical data used as input to the model. - `prediction_length`: This is same as the forecast horizon as described above. - `patch_length`: The patch length for the `PatchTSMixer` model. It is recommended to choose a value that evenly divides `context_length`. - `patch_stride`: The stride used when extracting patches from the context window. - `d_model`: Hidden feature dimension of the model. - `num_layers`: The number of model layers. - `dropout`: Dropout probability for all fully connected layers in the encoder. - `head_dropout`: Dropout probability used in the head of the model. - `mode`: PatchTSMixer operating mode. "common_channel"/"mix_channel". Common-channel works in channel-independent mode. For pretraining, use "common_channel". - `scaling`: Per-widow standard scaling. Recommended value: "std".For full details on the parameters, we refer to the [documentation](https://huggingface.co/docs/transformers/main/en/model_doc/patchtsmixertransformers.PatchTSMixerConfig).We recommend that you only adjust the values in the next cell.<jupyter_code>if PRETRAIN_AGAIN: config = PatchTSMixerConfig( context_length=context_length, prediction_length=forecast_horizon, patch_length=patch_length, num_input_channels=len(forecast_columns), patch_stride=patch_length, d_model=16, num_layers=8, expansion_factor=2, dropout=0.2, head_dropout=0.2, mode="common_channel", scaling="std", ) model = PatchTSMixerForPrediction(config)<jupyter_output><empty_output><jupyter_text>Train model Next, we can leverage the Hugging Face [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) class to train the model based on the direct forecasting strategy. We first define the [TrainingArguments](https://huggingface.co/docs/transformers/main_classes/trainertransformers.TrainingArguments) which lists various hyperparameters regarding training such as the number of epochs, learning rate, and so on.<jupyter_code>if PRETRAIN_AGAIN: training_args = TrainingArguments( output_dir="./checkpoint/patchtsmixer/electricity/pretrain/output/", overwrite_output_dir=True, learning_rate=0.001, num_train_epochs=100, # For a quick test of this notebook, set it to 1 do_eval=True, evaluation_strategy="epoch", per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, dataloader_num_workers=num_workers, report_to="tensorboard", save_strategy="epoch", logging_strategy="epoch", save_total_limit=3, logging_dir="./checkpoint/patchtsmixer/electricity/pretrain/logs/", # Make sure to specify a logging directory load_best_model_at_end=True, # Load the best model when training ends metric_for_best_model="eval_loss", # Metric to monitor for early stopping greater_is_better=False, # For loss label_names=["future_values"], # max_steps=20, ) # Create the early stopping callback early_stopping_callback = EarlyStoppingCallback( early_stopping_patience=10, # Number of epochs with no improvement after which to stop early_stopping_threshold=0.0001, # Minimum improvement required to consider as improvement ) # define trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=valid_dataset, callbacks=[early_stopping_callback], ) # pretrain trainer.train()<jupyter_output>/dccstor/dnn_forecasting/conda_envs/envs/hf/lib/python3.10/site-packages/torch/nn/parallel/_functions.py:68: UserWarning: Was asked to gather along dimension 0, but all input tensors were scalars; will instead unsqueeze and return a vector. warnings.warn('Was asked to gather along dimension 0, but all '<jupyter_text>Evaluate the model on the test set.**Note that the training and evaluation loss for PatchTSMixer is the Mean Squared Error (MSE) loss. Hence, we do not separately compute the MSE metric in any of the following evaluation experiments.**<jupyter_code>if PRETRAIN_AGAIN: results = trainer.evaluate(test_dataset) print("Test result:") print(results)<jupyter_output>/dccstor/dnn_forecasting/conda_envs/envs/hf/lib/python3.10/site-packages/torch/nn/parallel/_functions.py:68: UserWarning: Was asked to gather along dimension 0, but all input tensors were scalars; will instead unsqueeze and return a vector. warnings.warn('Was asked to gather along dimension 0, but all '<jupyter_text>We get MSE score of 0.128 which is the SOTA result on the Electricity data. Save model<jupyter_code>if PRETRAIN_AGAIN: save_dir = "patchtsmixer/electricity/model/pretrain/" os.makedirs(save_dir, exist_ok=True) trainer.save_model(save_dir)<jupyter_output><empty_output><jupyter_text>Part 2: Transfer Learning from Electricity to `ETTH2`In this section, we will demonstrate the transfer learning capability of the `PatchTSMixer` model.We use the model pre-trained on the Electricity dataset to do zero-shot forecasting on the `ETTH2` dataset.By Transfer Learning, we mean that we first pretrain the model for a forecasting task on a `source` dataset (which we did above on the `Electricity` dataset). Then, we will use the pretrained model for zero-shot forecasting on a `target` dataset. By zero-shot, we mean that we test the performance in the `target` domain without any additional training. We hope that the model gained enough knowledge from pretraining which can be transferred to a different dataset. Subsequently, we will do linear probing and (then) finetuning of the pretrained model on the `train` split of the target data, and will validate the forecasting performance on the `test` split of the target data. In this example, the source dataset is the Electricity dataset and the target dataset is `ETTH2`. Transfer Learning on `ETTh2` dataAll evaluations are on the `test` part of the `ETTh2` data:Step 1: Directly evaluate the electricity-pretrained model. This is the zero-shot performance. Step 2: Evaluate after doing linear probing. Step 3: Evaluate after doing full finetuning. Load `ETTh2` datasetBelow, we load the `ETTh2` dataset as a Pandas dataframe. Next, we create 3 splits for training, validation and testing. We then leverage the `TimeSeriesPreprocessor` class to prepare each split for the model.<jupyter_code>dataset = "ETTh2" print(f"Loading target dataset: {dataset}") dataset_path = f"https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/{dataset}.csv" timestamp_column = "date" id_columns = [] forecast_columns = ["HUFL", "HULL", "MUFL", "MULL", "LUFL", "LULL", "OT"] train_start_index = None # None indicates beginning of dataset train_end_index = 12 * 30 * 24 # we shift the start of the evaluation period back by context length so that # the first evaluation timestamp is immediately following the training data valid_start_index = 12 * 30 * 24 - context_length valid_end_index = 12 * 30 * 24 + 4 * 30 * 24 test_start_index = 12 * 30 * 24 + 4 * 30 * 24 - context_length test_end_index = 12 * 30 * 24 + 8 * 30 * 24 data = pd.read_csv( dataset_path, parse_dates=[timestamp_column], ) train_data = select_by_index( data, id_columns=id_columns, start_index=train_start_index, end_index=train_end_index, ) valid_data = select_by_index( data, id_columns=id_columns, start_index=valid_start_index, end_index=valid_end_index, ) test_data = select_by_index( data, id_columns=id_columns, start_index=test_start_index, end_index=test_end_index, ) tsp = TimeSeriesPreprocessor( timestamp_column=timestamp_column, id_columns=id_columns, input_columns=forecast_columns, output_columns=forecast_columns, scaling=True, ) tsp.train(train_data) train_dataset = ForecastDFDataset( tsp.preprocess(train_data), id_columns=id_columns, input_columns=forecast_columns, output_columns=forecast_columns, context_length=context_length, prediction_length=forecast_horizon, ) valid_dataset = ForecastDFDataset( tsp.preprocess(valid_data), id_columns=id_columns, input_columns=forecast_columns, output_columns=forecast_columns, context_length=context_length, prediction_length=forecast_horizon, ) test_dataset = ForecastDFDataset( tsp.preprocess(test_data), id_columns=id_columns, input_columns=forecast_columns, output_columns=forecast_columns, context_length=context_length, prediction_length=forecast_horizon, )<jupyter_output><empty_output><jupyter_text>Zero-shot forecasting on `ETTh2`As we are going to test forecasting performance out-of-the-box, we load the model which we pretrained above.<jupyter_code>print("Loading pretrained model") finetune_forecast_model = PatchTSMixerForPrediction.from_pretrained( "patchtsmixer/electricity/model/pretrain/" ) print("Done") finetune_forecast_args = TrainingArguments( output_dir="./checkpoint/patchtsmixer/transfer/finetune/output/", overwrite_output_dir=True, learning_rate=0.0001, num_train_epochs=100, do_eval=True, evaluation_strategy="epoch", per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, dataloader_num_workers=num_workers, report_to="tensorboard", save_strategy="epoch", logging_strategy="epoch", save_total_limit=3, logging_dir="./checkpoint/patchtsmixer/transfer/finetune/logs/", # Make sure to specify a logging directory load_best_model_at_end=True, # Load the best model when training ends metric_for_best_model="eval_loss", # Metric to monitor for early stopping greater_is_better=False, # For loss ) # Create a new early stopping callback with faster convergence properties early_stopping_callback = EarlyStoppingCallback( early_stopping_patience=5, # Number of epochs with no improvement after which to stop early_stopping_threshold=0.001, # Minimum improvement required to consider as improvement ) finetune_forecast_trainer = Trainer( model=finetune_forecast_model, args=finetune_forecast_args, train_dataset=train_dataset, eval_dataset=valid_dataset, callbacks=[early_stopping_callback], ) print("\n\nDoing zero-shot forecasting on target data") result = finetune_forecast_trainer.evaluate(test_dataset) print("Target data zero-shot forecasting result:") print(result)<jupyter_output>Doing zero-shot forecasting on target data<jupyter_text>As can be seen, we get a mean-squared error (MSE) of 0.3 zero-shot which is near to the state-of-the-art result.Next, let's see how we can do by performing linear probing, which involves training a linear classifier on top of a frozen pre-trained model. Linear probing is often done to test the performance of features of a pretrained model. Linear probing on `ETTh2`We can do a quick linear probing on the `train` part of the target data to see any possible `test` performance improvement.<jupyter_code># Freeze the backbone of the model for param in finetune_forecast_trainer.model.model.parameters(): param.requires_grad = False print("\n\nLinear probing on the target data") finetune_forecast_trainer.train() print("Evaluating") result = finetune_forecast_trainer.evaluate(test_dataset) print("Target data head/linear probing result:") print(result)<jupyter_output>Linear probing on the target data<jupyter_text>As can be seen, by training a simple linear layer on top of the frozen backbone, the MSE decreased from 0.3 to 0.271 achieving state-of-the-art results.<jupyter_code>save_dir = f"patchtsmixer/electricity/model/transfer/{dataset}/model/linear_probe/" os.makedirs(save_dir, exist_ok=True) finetune_forecast_trainer.save_model(save_dir) save_dir = f"patchtsmixer/electricity/model/transfer/{dataset}/preprocessor/" os.makedirs(save_dir, exist_ok=True) tsp.save_pretrained(save_dir)<jupyter_output><empty_output><jupyter_text>Finally, let's see if we get any more improvements by doing a full finetune of the model on the target dataset. Full finetuning on `ETTh2`We can do a full model finetune (instead of probing the last linear layer as shown above) on the `train` part of the target data to see a possible `test` performance improvement. The code looks similar to the linear probing task above, except that we are not freezing any parameters.<jupyter_code># Reload the model finetune_forecast_model = PatchTSMixerForPrediction.from_pretrained( "patchtsmixer/electricity/model/pretrain/" ) finetune_forecast_trainer = Trainer( model=finetune_forecast_model, args=finetune_forecast_args, train_dataset=train_dataset, eval_dataset=valid_dataset, callbacks=[early_stopping_callback], ) print("\n\nFinetuning on the target data") finetune_forecast_trainer.train() print("Evaluating") result = finetune_forecast_trainer.evaluate(test_dataset) print("Target data full finetune result:") print(result)<jupyter_output>Finetuning on the target data<jupyter_text>In this case, there is not much improvement by doing full finetuning. Let's save the model anyway.<jupyter_code>save_dir = f"patchtsmixer/electricity/model/transfer/{dataset}/model/fine_tuning/" os.makedirs(save_dir, exist_ok=True) finetune_forecast_trainer.save_model(save_dir)<jupyter_output><empty_output>
notebooks/examples/patch_tsmixer.ipynb/0
{ "file_path": "notebooks/examples/patch_tsmixer.ipynb", "repo_id": "notebooks", "token_count": 7682 }
147
<jupyter_start><jupyter_text>If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers and 🤗 Datasets. Uncomment the following cell and run it.<jupyter_code>#! pip install datasets transformers<jupyter_output><empty_output><jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then execute the following cell and input your username and password:<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Then you need to install Git-LFS. Uncomment the following instructions:<jupyter_code># !apt install git-lfs<jupyter_output><empty_output><jupyter_text>Make sure your version of Transformers is at least 4.11.0 since the functionality was introduced in that version:<jupyter_code>import transformers print(transformers.__version__)<jupyter_output><empty_output><jupyter_text>You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/master/examples/text-classification). We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("text_classification_notebook", framework="pytorch")<jupyter_output><empty_output><jupyter_text>Fine-tuning a model on a text classification task In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) model to a text classification task of the [GLUE Benchmark](https://gluebenchmark.com/).The GLUE Benchmark is a group of nine classification tasks on sentences or pairs of sentences which are:- [CoLA](https://nyu-mll.github.io/CoLA/) (Corpus of Linguistic Acceptability) Determine if a sentence is grammatically correct or not.is a dataset containing sentences labeled grammatically correct or not.- [MNLI](https://arxiv.org/abs/1704.05426) (Multi-Genre Natural Language Inference) Determine if a sentence entails, contradicts or is unrelated to a given hypothesis. (This dataset has two versions, one with the validation and test set coming from the same distribution, another called mismatched where the validation and test use out-of-domain data.)- [MRPC](https://www.microsoft.com/en-us/download/details.aspx?id=52398) (Microsoft Research Paraphrase Corpus) Determine if two sentences are paraphrases from one another or not.- [QNLI](https://rajpurkar.github.io/SQuAD-explorer/) (Question-answering Natural Language Inference) Determine if the answer to a question is in the second sentence or not. (This dataset is built from the SQuAD dataset.)- [QQP](https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs) (Quora Question Pairs2) Determine if two questions are semantically equivalent or not.- [RTE](https://aclweb.org/aclwiki/Recognizing_Textual_Entailment) (Recognizing Textual Entailment) Determine if a sentence entails a given hypothesis or not.- [SST-2](https://nlp.stanford.edu/sentiment/index.html) (Stanford Sentiment Treebank) Determine if the sentence has a positive or negative sentiment.- [STS-B](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark) (Semantic Textual Similarity Benchmark) Determine the similarity of two sentences with a score from 1 to 5.- [WNLI](https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html) (Winograd Natural Language Inference) Determine if a sentence with an anonymous pronoun and a sentence with this pronoun replaced are entailed or not. (This dataset is built from the Winograd Schema Challenge dataset.)We will see how to easily load the dataset for each one of those tasks and use the `Trainer` API to fine-tune a model on it. Each task is named by its acronym, with `mnli-mm` standing for the mismatched version of MNLI (so same training set as `mnli` but different validation and test sets):<jupyter_code>GLUE_TASKS = ["cola", "mnli", "mnli-mm", "mrpc", "qnli", "qqp", "rte", "sst2", "stsb", "wnli"]<jupyter_output><empty_output><jupyter_text>This notebook is built to run on any of the tasks in the list above, with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a version with a classification head. Depending on you model and the GPU you are using, you might need to adjust the batch size to avoid out-of-memory errors. Set those three parameters, then the rest of the notebook should run smoothly:<jupyter_code>task = "cola" model_checkpoint = "distilbert-base-uncased" batch_size = 16<jupyter_output><empty_output><jupyter_text>Loading the dataset We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download the data and get the metric we need to use for evaluation (to compare our model to the benchmark). This can be easily done with the functions `load_dataset` and `load_metric`.<jupyter_code>from datasets import load_dataset, load_metric<jupyter_output><empty_output><jupyter_text>Apart from `mnli-mm` being a special code, we can directly pass our task name to those functions. `load_dataset` will cache the dataset to avoid downloading it again the next time you run this cell.<jupyter_code>actual_task = "mnli" if task == "mnli-mm" else task dataset = load_dataset("glue", actual_task) metric = load_metric('glue', actual_task)<jupyter_output>Reusing dataset glue (/home/sgugger/.cache/huggingface/datasets/glue/cola/1.0.0/7c99657241149a24692c402a5c3f34d4c9f1df5ac2e4c3759fadea38f6cb29c4)<jupyter_text>The `dataset` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key for the training, validation and test set (with more keys for the mismatched validation and test set in the special case of `mnli`).<jupyter_code>dataset<jupyter_output><empty_output><jupyter_text>To access an actual element, you need to select a split first, then give an index:<jupyter_code>dataset["train"][0]<jupyter_output><empty_output><jupyter_text>To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset.<jupyter_code>import datasets import random import pandas as pd from IPython.display import display, HTML def show_random_elements(dataset, num_examples=10): assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset." picks = [] for _ in range(num_examples): pick = random.randint(0, len(dataset)-1) while pick in picks: pick = random.randint(0, len(dataset)-1) picks.append(pick) df = pd.DataFrame(dataset[picks]) for column, typ in dataset.features.items(): if isinstance(typ, datasets.ClassLabel): df[column] = df[column].transform(lambda i: typ.names[i]) display(HTML(df.to_html())) show_random_elements(dataset["train"])<jupyter_output><empty_output><jupyter_text>The metric is an instance of [`datasets.Metric`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasets.Metric):<jupyter_code>metric<jupyter_output><empty_output><jupyter_text>You can call its `compute` method with your predictions and labels directly and it will return a dictionary with the metric(s) value:<jupyter_code>import numpy as np fake_preds = np.random.randint(0, 2, size=(64,)) fake_labels = np.random.randint(0, 2, size=(64,)) metric.compute(predictions=fake_preds, references=fake_labels)<jupyter_output><empty_output><jupyter_text>Note that `load_metric` has loaded the proper metric associated to your task, which is:- for CoLA: [Matthews Correlation Coefficient](https://en.wikipedia.org/wiki/Matthews_correlation_coefficient)- for MNLI (matched or mismatched): Accuracy- for MRPC: Accuracy and [F1 score](https://en.wikipedia.org/wiki/F1_score)- for QNLI: Accuracy- for QQP: Accuracy and [F1 score](https://en.wikipedia.org/wiki/F1_score)- for RTE: Accuracy- for SST-2: Accuracy- for STS-B: [Pearson Correlation Coefficient](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient) and [Spearman's_Rank_Correlation_Coefficient](https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient)- for WNLI: Accuracyso the metric object only computes the one(s) needed for your task. Preprocessing the data Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires.To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:- we get a tokenizer that corresponds to the model architecture we want to use,- we download the vocabulary used when pretraining this specific checkpoint.That vocabulary will be cached, so it's not downloaded again the next time we run the cell.<jupyter_code>from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True)<jupyter_output><empty_output><jupyter_text>We pass along `use_fast=True` to the call above to use one of the fast tokenizers (backed by Rust) from the 🤗 Tokenizers library. Those fast tokenizers are available for almost all models, but if you got an error with the previous call, remove that argument. You can directly call this tokenizer on one sentence or a pair of sentences:<jupyter_code>tokenizer("Hello, this one sentence!", "And this sentence goes with it.")<jupyter_output><empty_output><jupyter_text>Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested.To preprocess our dataset, we will thus need the names of the columns containing the sentence(s). The following dictionary keeps track of the correspondence task to column names:<jupyter_code>task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mnli-mm": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), }<jupyter_output><empty_output><jupyter_text>We can double check it does work on our current dataset:<jupyter_code>sentence1_key, sentence2_key = task_to_keys[task] if sentence2_key is None: print(f"Sentence: {dataset['train'][0][sentence1_key]}") else: print(f"Sentence 1: {dataset['train'][0][sentence1_key]}") print(f"Sentence 2: {dataset['train'][0][sentence2_key]}")<jupyter_output>Sentence: Our friends won't buy this analysis, let alone the next one we propose.<jupyter_text>We can then write the function that will preprocess our samples. We just feed them to the `tokenizer` with the argument `truncation=True`. This will ensure that an input longer that what the model selected can handle will be truncated to the maximum length accepted by the model.<jupyter_code>def preprocess_function(examples): if sentence2_key is None: return tokenizer(examples[sentence1_key], truncation=True) return tokenizer(examples[sentence1_key], examples[sentence2_key], truncation=True)<jupyter_output><empty_output><jupyter_text>This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:<jupyter_code>preprocess_function(dataset['train'][:5])<jupyter_output><empty_output><jupyter_text>To apply this function on all the sentences (or pairs of sentences) in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command.<jupyter_code>encoded_dataset = dataset.map(preprocess_function, batched=True)<jupyter_output><empty_output><jupyter_text>Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. Fine-tuning the model Now that our data is ready, we can download the pretrained model and fine-tune it. Since all our tasks are about sentence classification, we use the `AutoModelForSequenceClassification` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us. The only thing we have to specify is the number of labels for our problem (which is always 2, except for STS-B which is a regression problem and MNLI where we have 3 labels):<jupyter_code>from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer num_labels = 3 if task.startswith("mnli") else 1 if task=="stsb" else 2 model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=num_labels)<jupyter_output>Some weights of the model checkpoint at distilbert-base-uncased were not used when initializing DistilBertForSequenceClassification: ['vocab_transform.weight', 'vocab_transform.bias', 'vocab_layer_norm.weight', 'vocab_layer_norm.bias', 'vocab_projector.weight', 'vocab_projector.bias'] - This IS expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model). - This IS NOT expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of DistilBertForSequenceClassification were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['pre_classifier.weight', 'pre_classifier.bias', 'classi[...]<jupyter_text>The warning is telling us we are throwing away some weights (the `vocab_transform` and `vocab_layer_norm` layers) and randomly initializing some other (the `pre_classifier` and `classifier` layers). This is absolutely normal in this case, because we are removing the head used to pretrain the model on a masked language modeling objective and replacing it with a new head for which we don't have pretrained weights, so the library warns us we should fine-tune this model before using it for inference, which is exactly what we are going to do. To instantiate a `Trainer`, we will need to define two more things. The most important is the [`TrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.htmltransformers.TrainingArguments), which is a class that contains all the attributes to customize the training. It requires one folder name, which will be used to save the checkpoints of the model, and all other arguments are optional:<jupyter_code>metric_name = "pearson" if task == "stsb" else "matthews_correlation" if task == "cola" else "accuracy" model_name = model_checkpoint.split("/")[-1] args = TrainingArguments( f"{model_name}-finetuned-{task}", evaluation_strategy = "epoch", save_strategy = "epoch", learning_rate=2e-5, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, num_train_epochs=5, weight_decay=0.01, load_best_model_at_end=True, metric_for_best_model=metric_name, push_to_hub=True, )<jupyter_output><empty_output><jupyter_text>Here we set the evaluation to be done at the end of each epoch, tweak the learning rate, use the `batch_size` defined at the top of the notebook and customize the number of epochs for training, as well as the weight decay. Since the best model might not be the one at the end of training, we ask the `Trainer` to load the best model it saved (according to `metric_name`) at the end of training.The last argument to setup everything so we can push the model to the [Hub](https://huggingface.co/models) regularly during training. Remove it if you didn't follow the installation steps at the top of the notebook. If you want to save your model locally in a name that is different than the name of the repository it will be pushed, or if you want to push your model under an organization and not your name space, use the `hub_model_id` argument to set the repo name (it needs to be the full name, including your namespace: for instance `"sgugger/bert-finetuned-mrpc"` or `"huggingface/bert-finetuned-mrpc"`). The last thing to define for our `Trainer` is how to compute the metrics from the predictions. We need to define a function for this, which will just use the `metric` we loaded earlier, the only preprocessing we have to do is to take the argmax of our predicted logits (our just squeeze the last axis in the case of STS-B):<jupyter_code>def compute_metrics(eval_pred): predictions, labels = eval_pred if task != "stsb": predictions = np.argmax(predictions, axis=1) else: predictions = predictions[:, 0] return metric.compute(predictions=predictions, references=labels)<jupyter_output><empty_output><jupyter_text>Then we just need to pass all of this along with our datasets to the `Trainer`:<jupyter_code>validation_key = "validation_mismatched" if task == "mnli-mm" else "validation_matched" if task == "mnli" else "validation" trainer = Trainer( model, args, train_dataset=encoded_dataset["train"], eval_dataset=encoded_dataset[validation_key], tokenizer=tokenizer, compute_metrics=compute_metrics )<jupyter_output><empty_output><jupyter_text>You might wonder why we pass along the `tokenizer` when we already preprocessed our data. This is because we will use it once last time to make all the samples we gather the same length by applying padding, which requires knowing the model's preferences regarding padding (to the left or right? with which token?). The `tokenizer` has a pad method that will do all of this right for us, and the `Trainer` will use it. You can customize this part by defining and passing your own `data_collator` which will receive the samples like the dictionaries seen above and will need to return a dictionary of tensors. We can now finetune our model by just calling the `train` method:<jupyter_code>trainer.train()<jupyter_output><empty_output><jupyter_text>We can check with the `evaluate` method that our `Trainer` did reload the best model properly (if it was not the last one):<jupyter_code>trainer.evaluate()<jupyter_output><empty_output><jupyter_text>To see how your model fared you can compare it to the [GLUE Benchmark leaderboard](https://gluebenchmark.com/leaderboard). You can now upload the result of the training to the Hub, just execute this instruction:<jupyter_code>trainer.push_to_hub()<jupyter_output><empty_output><jupyter_text>You can now share this model with all your friends, family, favorite pets: they can all load it with the identifier `"your-username/the-name-you-picked"` so for instance:```pythonfrom transformers import AutoModelForSequenceClassificationmodel = AutoModelForSequenceClassification.from_pretrained("sgugger/my-awesome-model")``` Hyperparameter search The `Trainer` supports hyperparameter search using [optuna](https://optuna.org/) or [Ray Tune](https://docs.ray.io/en/latest/tune/). For this last section you will need either of those libraries installed, just uncomment the line you want on the next cell and run it.<jupyter_code># ! pip install optuna # ! pip install ray[tune]<jupyter_output><empty_output><jupyter_text>During hyperparameter search, the `Trainer` will run several trainings, so it needs to have the model defined via a function (so it can be reinitialized at each new run) instead of just having it passed. We jsut use the same function as before:<jupyter_code>def model_init(): return AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=num_labels)<jupyter_output><empty_output><jupyter_text>And we can instantiate our `Trainer` like before:<jupyter_code>trainer = Trainer( model_init=model_init, args=args, train_dataset=encoded_dataset["train"], eval_dataset=encoded_dataset[validation_key], tokenizer=tokenizer, compute_metrics=compute_metrics )<jupyter_output>Some weights of the model checkpoint at distilbert-base-uncased were not used when initializing DistilBertForSequenceClassification: ['vocab_transform.weight', 'vocab_transform.bias', 'vocab_layer_norm.weight', 'vocab_layer_norm.bias', 'vocab_projector.weight', 'vocab_projector.bias'] - This IS expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model). - This IS NOT expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of DistilBertForSequenceClassification were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['pre_classifier.weight', 'pre_classifier.bias', 'classi[...]<jupyter_text>The method we call this time is `hyperparameter_search`. Note that it can take a long time to run on the full dataset for some of the tasks. You can try to find some good hyperparameter on a portion of the training dataset by replacing the `train_dataset` line above by:```pythontrain_dataset = encoded_dataset["train"].shard(index=1, num_shards=10) ```for 1/10th of the dataset. Then you can run a full training on the best hyperparameters picked by the search.<jupyter_code>best_run = trainer.hyperparameter_search(n_trials=10, direction="maximize")<jupyter_output>[I 2020-10-15 17:06:50,302] A new study created in memory with name: no-name-5ca133fa-884f-4b0b-beb6-82fb19c9da06 Some weights of the model checkpoint at distilbert-base-uncased were not used when initializing DistilBertForSequenceClassification: ['vocab_transform.weight', 'vocab_transform.bias', 'vocab_layer_norm.weight', 'vocab_layer_norm.bias', 'vocab_projector.weight', 'vocab_projector.bias'] - This IS expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model). - This IS NOT expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of DistilBertForSequenceClassification were not initialized from the [...]<jupyter_text>The `hyperparameter_search` method returns a `BestRun` objects, which contains the value of the objective maximized (by default the sum of all metrics) and the hyperparameters it used for that run.<jupyter_code>best_run<jupyter_output><empty_output><jupyter_text>You can customize the objective to maximize by passing along a `compute_objective` function to the `hyperparameter_search` method, and you can customize the search space by passing a `hp_space` argument to `hyperparameter_search`. See this [forum post](https://discuss.huggingface.co/t/using-hyperparameter-search-in-trainer/785/10) for some examples.To reproduce the best training, just set the hyperparameters in your `TrainingArgument` before creating a `Trainer`:<jupyter_code>for n, v in best_run.hyperparameters.items(): setattr(trainer.args, n, v) trainer.train()<jupyter_output>Some weights of the model checkpoint at distilbert-base-uncased were not used when initializing DistilBertForSequenceClassification: ['vocab_transform.weight', 'vocab_transform.bias', 'vocab_layer_norm.weight', 'vocab_layer_norm.bias', 'vocab_projector.weight', 'vocab_projector.bias'] - This IS expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model). - This IS NOT expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of DistilBertForSequenceClassification were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['pre_classifier.weight', 'pre_classifier.bias', 'classi[...]
notebooks/examples/text_classification.ipynb/0
{ "file_path": "notebooks/examples/text_classification.ipynb", "repo_id": "notebooks", "token_count": 7676 }
148
<jupyter_start><jupyter_text>Explain *Anything* Like I'm Five: A Model for Open Domain Long Form Question Answering--- Table of Contents 1. [**Introduction**](intro) a. [Preliminaries](prelims) b. [Note on Data and Biases](reddit_biases)2. [**Task and Data Description**](task_description) 3. [**Sparse Retrieval:** Making Support Documents with ElasticSearch](elasticsearch) 4. [**Dense Retrieval:** Making Support Documents with an ELI5-Trained Model](dense_retrieval) b. [Contrastive Training with ELI5 In-Batch Negatives](dense_train) c. [Using the Trained Dense Retriever and Wikipedia Index](dense_use) d. [Retrieval Model Evaluation](dense_eval) 5. [**Generating Answers with a Sequence-to-Sequence Model**](generation) --- --- Screenshot of the demo for the question: "How do people make chocolate?" --- 1. IntroductionImagine that you are taken with a sudden desire to understand **how the fruit of a tropical tree gets transformed into chocolate bars**, or want to understand **the role of fever in the human body's immune response**: how would you go about finding that information?If your specific question has already been asked and answered clearly and succintly on one of the many question answering platforms available on the Internet (such as [**Quora**](https://www.quora.com/How-is-chocolate-made), [**Reddit**](https://www.reddit.com/user/ex_5_libris/comments/9c8gb1/chocolate_how_chocolate_is_made/), or [**Yahoo Answers**](https://answers.yahoo.com/question/index?qid=20070615082202AArsYN1)), you're in luck: modern search engines will probably take you to that pre-existing answer pretty reliably in a matter of a few clicks. If no one else has asked the exact question you are interested in, however, the process will be a little more involved. You will likely have to collect relevant information from a variety of sources, figure out how these pieces of knowledge fit together in relation to your query, and synthetize a narrative that answers your initial question.Now, wouldn't it be great if your computer could do all of that for you: **gather** the right sources (*e.g.* paragraphs from relevant Wikipedia pages), **synthetize** the information, and **write up** an easy-to-read, original summary of the relevant points? Such a system isn't quite available yet, at least not one that can provide *reliable* information in its summary. Even though current systems excel at finding an extractive span that answers a factoid question in a given document, they still find open-domain settings where a model needs to find its own sources of information and long answer generation challenging. Thankfully, a number of recent advances in natural language understanding and generation have made working toward solving this problem much easier! These advances include progress in the pre-training (e.g. [BART](https://arxiv.org/abs/1910.13461), [T5](https://arxiv.org/abs/1910.10683)) and evaluation (e.g. for [factuality](https://arxiv.org/abs/2004.04228)) of sequence-to-sequence models for conditional text generation, new ways to use language understanding models to find information in Wikipedia (e.g. [REALM](https://kentonl.com/pub/gltpc.2020.pdf), [DPR](https://arxiv.org/abs/2004.04906)), and a new training dataset introduced in the paper [ELI5: Long Form Question Answering](https://arxiv.org/abs/1907.09190).The [ELI5](https://arxiv.org/abs/1907.09190) dataset was built by gathering questions that were asked by community members of the [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/) subreddit, along with the answers that were provided by other users. The [rules of the subreddit](https://www.reddit.com/r/explainlikeimfive/wiki/detailed_rules) make this data particularly well suited to training a model for abstractive question answering: the questions need to seek an objective explanation about well established facts, and the answers provided need to be understandable to a layperson without any particular knowledge domain.**In this notebook,** we show how we can take advantage of these recent advances to train a **long form question answering** system which takes in a question, fetches 10 relevant passages from a [Wikipedia snapshot](https://www.aclweb.org/anthology/2020.lrec-1.297/), and writes a multi-sentence answer based on the question and retrieved passages. In particular, training embedding-based retrieval models to gather supporting evidence for open-domain questions is relatively new research area: the last few months have seen some significant progress in cases where direct supervision is available, or with extensive task-specific pretraining. Here, we show how the ELI5 dataset allows us to **train a dense retrieval system without access to either**, making dense retrieval models more accessible. See [this presentation](https://docs.google.com/presentation/d/1A5wJEzFYGdNem7egJ-BTm6EMI3jGNe1lalyChYL54gw) from the [Hugging Face reading group](https://github.com/huggingface/awesome-papers) for a non-exhaustive overview of recent work in the field. Follow along to learn about the steps involved and read some background on the state of the art for some related tasks, or go straight to the: [**Live Demo!**](https://huggingface.co/qa/) (And don't forget to scroll down on the left sidebar to show all of the generation options!) 1.a - Preliminaries The implementation presented here relies on the [Hugging Face](https://huggingface.co/) [🤗transformers](https://github.com/huggingface/transformers) and [🤗nlp](https://github.com/huggingface/nlp) libraries. Wikipedia indexing relies on [ElasticSearch](https://www.elastic.co/elasticsearch) with its [python bindings](https://github.com/elastic/elasticsearch-py) for the sparse version, and [faiss](https://github.com/facebookresearch/faiss/) for the dense version. You can get all of these by running:> pip install elasticsearch > pip install faiss_gpu > pip install nlp > pip install transformers > > wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.7.1-linux-x86_64.tar.gz > tar -xzvf elasticsearch-7.7.1-linux-x86_64.tar.gz The training relies on two datasets: [ELI5](https://arxiv.org/abs/1907.09190), a processed version of the [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/) subreddit, and the [Wiki40b](https://www.aclweb.org/anthology/2020.lrec-1.297/) Wikipedia image. You can download both using the [🤗nlp](https://github.com/huggingface/nlp) linrary with:<jupyter_code>import nlp eli5 = nlp.load_dataset('eli5') wiki40b_snippets = nlp.load_dataset('wiki_snippets', name='wiki40b_en_100_0')['train']<jupyter_output><empty_output><jupyter_text>Additionally, all of the useful methods used in this notebook are compiled in the [lfqa_utils.py](https://github.com/huggingface/longform-qa/lfqa_utils.py) script:<jupyter_code>from lfqa_utils import *<jupyter_output><empty_output><jupyter_text>1.b - Note on Data and BiasesBefore we go any further, let us take a moment to talk about the provenance of our training data. While Reddit hosts a number of thriving communities with high quality discussions, it is also widely known to have corners where sexism, hate, and harassment are significant issues. See for example the [recent post from Reddit founder u/spez](https://www.reddit.com/r/announcements/comments/gxas21/upcoming_changes_to_our_content_policy_our_board/) outlining some of the ways he thinks the website's historical policies have been responsible for this problem, [Adrienne Massanari's 2015 article on GamerGate](https://www.researchgate.net/publication/283848479_Gamergate_and_The_Fappening_How_Reddit's_algorithm_governance_and_culture_support_toxic_technocultures) and follow-up works, or a [2019 Wired article on misogyny on Reddit](https://www.wired.com/story/misogyny-reddit-research/).While there has been some recent work in the NLP community on *de-biasing* models (e.g. [Black is to Criminal as Caucasian is to Police:Detecting and Removing Multiclass Bias in Word Embeddings](https://arxiv.org/abs/1904.04047) for word embeddings trained specifically on Reddit data), this problem is far from solved, and the likelihood that a trained model might learn the biases present in the data remains a significant concern.As mentioned above, the magnitude of the problem depends on the specific communities/subreddits. This work uses data from [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/), and the `nlp` library also gives access to examples from [r/askscience](https://www.reddit.com/r/askscience/), and [r/AskHistorians](https://www.reddit.com/r/AskHistorians/). There are some encouraging signs for all of these communities: [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/) and [r/askscience](https://www.reddit.com/r/askscience/) have similar structures and purposes, and [r/askscience](https://www.reddit.com/r/askscience/) was found in 2015 to show medium supportiveness and very low toxicity when compared to other subreddits (see a [hackerfall post](https://hackerfall.com/story/study-and-interactive-visualization-of-toxicity-in), [thecut.com write-up](https://www.thecut.com/2015/03/interactive-chart-of-reddits-toxicity.html) and supporting [data](https://chart-studio.plotly.com/~bsbell21/210/toxicity-vs-supportiveness-by-subreddit/data)). Meanwhile, the [r/AskHistorians rules](https://www.reddit.com/r/AskHistorians/wiki/rules) mention that the admins will not tolerate "*racism, sexism, or any other forms of bigotry*".This is obviously not enough to exonerate the model (the pre-training step, for example, raises its own questions on that topic), and there is still a lot of interesting work to do to be able to quantify the biases in a conditional text generation model. One thing you can do to help: if you find any particularly egregious answers provided by the model when using the demo, or want to work on this research question please send a DM to [@YJernite on Twitter](https://twitter.com/YJernite)! 2. Task and Data DescriptionLet's recap: we are interested in the task of Long Form Question Answering. As in other Question Answering tasks, the model is presented with a question, and is required to generate a natural language answer. Whereas a majority of QA datasets contain mostly **factoid** questions, where the answer, such as a date or the name of a single entity, can be expressed in a few words or single sentence, Long Form QA focuses on questions which call for an **explanation** consisting of a few sentences or a few paragraphs.In order to teach a model to answer such questions, we use questions and answers written by Reddit users. Note that the `nlp.load_dataset` command above actually downloaded questions and their associated answers from the [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/), [r/askscience](https://www.reddit.com/r/askscience/), and [r/AskHistorians](https://www.reddit.com/r/AskHistorians/) subreddits. We focus here on the **ELI5/explainlikeimfive** part to train the system, as these examples tend to be a little simpler. Let's look at one item from the test set:<jupyter_code>eli5['test_eli5'][12345]<jupyter_output><empty_output><jupyter_text>So here we have the question:> Why does water heated to room temperature feel colder than the air around it? This definitely requires a multi-step explanation: no single phrase can sum up all of the information we are looking for. Here are the answers that were given on ELI5, and were given scores of +5 and +2 respectively by Reddit users:> 1. Water transfers heat more efficiently than air. When something feels cold it's because heat is being transferred from your skin to whatever you're touching. Since water absorbs the heat more readily than air, it feels colder. > 2. Air isn't as good at transferring heat compared to something like water or steel (sit on a room temperature steel bench vs. a room temperature wooden bench, and the steel one will feel more cold). When you feel cold, what you're feeling is heat being transferred out of you. If there is no breeze, you feel a certain way. If there's a breeze, you will get colder faster (because the moving air is pulling the heat away from you), and if you get into water, its quite good at pulling heat from you. Get out of the water and have a breeze blow on you while you're wet, all of the water starts evaporating, pulling even more heat from you. First, note that in this case **we have two answers** which broadly describe the same phenomenon: the first one is scored higher because it is more succint and to the point. This example already illustrates one important feature of the LFQA task: **there are usually several valid ways to answer a given question.** Of the 272K examples in the ELI5 training set, nearly two thirds (167K) have at least two answers. We'll need to keep this in mind when training and evaluation of the model. Secondly, we need to give our model access to the information that is expressed in both these answers. Recently released models have been shown to include a significant amount of world knowledge in their parameters without the need of any external knowledge at all (see e.g. the [Closed-book QA performance of the T5 model](https://arxiv.org/abs/2002.08910)). There are several advantages to giving the model explicit access to information in text form however. First, a larger number of parameters in a model implies a larger computational cost. Secondly, getting information from a text database allows us to easily update the model's knowledge without having to re-train its parameters.--- --- Overview of the full question answering process. First, the Document Retriever selects a set of passages from Wikipedia that have information relevant to the question. Then, the Answer Generation Model reads the concatenation of the question and retrieverd passages, and writes out the answer.--- Here, we choose to give the model access to Wikipedia text. Full Wikipedia articles are typically too long for most current models to handle, and notable exceptions like the [Reformer](https://arxiv.org/abs/2001.04451) or [Longformer](https://arxiv.org/abs/2004.05150) architectures unfortunately do not yet have pre-trained sequence-to-sequence variants. Thus, we follow previous work in splitting Wikipedia articles into disjoint snippets of 100 words, and keep track of the title of the article and sections a snippet came from. Here's how you can get a pre-processed Wiki40b version split into 100-word passages with the `nlp` library, and an example snippet which has some of the information we're looking for ("*little conduction would occur since air is a poor conductor of heat*"):<jupyter_code>wiki40b_snippets[8991855]<jupyter_output><empty_output><jupyter_text>In the next two sections, we show how we can use either a [sparse retriever](elasticsearch) or a [trained dense retriever](dense_retrieval) to automatically find relevant snippets for a question. 3. Sparse Retrieval: Making Support Documents with ElasticSearchIn this section, we show how to use either such a "classical" Information Retrieval (IR) system based on **sparse** word matching with [ElasticSearch](https://www.elastic.co/elasticsearch/), an extremely popular and efficient search engine that can be used for finding documents that match a given query based on word overlap. Specifically, ElasticSearch provides a convenient way to index documents so they can easily be queried for nearest neighbor search using the [BM25](https://en.wikipedia.org/wiki/Okapi_BM25) similarity function (which relies on TF-IDF weighting of words). While this word-matching based approach has obvious limitations, such as failing to take synonyms and sometimes grammatical variation into account, it does pretty well overall and has only recently been overtaken by embedding-based systems for Wikipedia-based Open-Domain QA tasks.In order to use ElasticSearch, you will first need to launch a server. In a different window, run:> ./elasticsearch-7.7.0/bin/elasticsearchBy default, your ElasticSearch server will be listening on `localhost` port `9200`. To connect to it run:<jupyter_code>es_client = Elasticsearch([{'host': 'localhost', 'port': '9200'}])<jupyter_output><empty_output><jupyter_text>The `eli5_utils.py` script provides utilities to create (`make_es_index_snippets`) and query (`query_es_index`) an ElasticSearch index from within Python.The main implementation details are: 1. We index the article title, section title, and text of each of the passages for BM25 passages. These choices are implemented in the `index_config` variable: ```python index_config = { "settings": { "number_of_shards": 1, }, "mappings": { "properties": { "article_title": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, "section_title": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, "passage_text": {"type": "text", "analyzer": "standard", "similarity": "BM25"} } } }```2. To query the index, we found it useful to add a few task-dependent stop-words. The query text is then compared to all of the indexed fields, giving more weight to the passage text:```python banned = ['how', 'why', 'what', 'where', 'which', 'do', 'does', 'is', '?', 'eli5', 'eli5:'] q = ' '.join([w for w in q.split() if w not in banned]) response = es_client.search( index = index_name, body = { "query": { "multi_match": { "query": q, "fields": ["article_title", "section_title", "passage_text^2"], "type": "cross_fields", } }, "size": n_results, } )``` Here's the command to create the index, it should take one to three hours depending on your system.<jupyter_code>if not es_client.indices.exists('wiki40b_snippets_100w'): make_es_index_snippets(es_client, wiki40b_snippets, index_name='wiki40b_snippets_100w')<jupyter_output><empty_output><jupyter_text>Now let's test the ElasticSearch retriever with our running example ELI5 question about skin-to-water heat transfer by returning the 10 best candidate passages:<jupyter_code>question = eli5['test_eli5'][12345]['title'] doc, res_list = query_es_index(question, es_client, index_name='wiki40b_snippets_100w') df = pd.DataFrame({ 'Article': ['---'] + [res['article_title'] for res in res_list], 'Sections': ['---'] + [res['section_title'] if res['section_title'].strip() != '' else res['article_title'] for res in res_list], 'Text': ['--- ' + question] + [res['passage_text'] for res in res_list], }) df.style.set_properties(**{'text-align': 'left'})<jupyter_output><empty_output><jupyter_text>We can immediately see both the strengths and limitations of this approach. The system manages to retrieve documents that are all broadly on topic, mentioning some combination of *water*, *air*, *relative temperature*, and *temperature transfer*. In spite of this, only example 8 ends up containing information that is actually relevant to the question:> Cold air with high relative humidity "feels" colder than dry air of the same temperature because high humidity in cold weather increases the conduction of heat from the body. We got lucky this time, but this passage could as easily have been ranked 11th and not been included in the support document we provide to the answer generation system. As it is, the model will have to sort through mostly off-topic information to find this sentence when reading the resulting supporting document. 4. Retrieving Support Documents with an ELI5-Trained Dense ModelThe sparse retriever works by finding passages which feature the words from the query. However, it has no way to know *a priori* which of these words are more important in context, and seems to struggle with understanding the central theme of the query (human-perceived temperature).Thankfully, some recent works have taken advantage of advances in pre-trained contextual word representations to solve this problem. Models such as [DPR](https://arxiv.org/abs/2004.04906) or [REALM](https://arxiv.org/abs/2002.08909) for example learn to compute a vector representation of the query, as well as vector representations of Wikipedia passages in such a way that the passages that best answers a question maximize the dot product between the two representations. Retrieval is then reduced to a Maximum Inner Product Search, which can be executed efficiently using systems like [FAISS](https://github.com/facebookresearch/faiss).These successes are very encouraging for our Open-Domain Long Form QA application. However, our task and setup do not quite meet the requirements of either of either of these approaches. On the one hand, the [DPR](https://arxiv.org/abs/2004.04906) system is trained using gold passage annotations: most major QA dataset tell the system which Wikipedia passage contains the answer. Unfortunately, we do not have such annotations for the ELI5 data. On the other hand, while [REALM](https://arxiv.org/abs/2002.08909) is trained without passage supervision, it requires a pretty expensive pre-training step with an [Inverse Cloze Task](https://arxiv.org/abs/1906.00300) (100,000 steps with batch size 4096), and the ability to re-compute the embeddings of all Wikipedia passages regularly during training.In order to train a similar dense retrieval system at reduced cost without having access to gold passage annotation, we will have to **take advantage of another unique feature of our dataset**, namely the fact that the long form answers are quite similar in style to the Wikipedia passages we want to index. Our hypothesis then is that if we train a system to embed the questions and answers in our dataset in a way that allows us to easily match questions to answers, then using the answer embedder on Wikipedia passages should allow us to similarly match questions to supporting evidence from Wikipedia. 4.a - Contrastive Training with ELI5 In-Batch NegativesAs mentioned above, we want to train a system to produce question and answer embeddings, such that the dot product between the representation of a question and any of its answers is greater than between it and answers of all of the other questions in the dataset. Unfortunately, actually comparing all questions to all answers before taking every single gradient step is computationally prohibitive: instead, we follow previous work in simply processing medium to large batches of question-answer pairs, and making sure that the dot product of a question with its answer is larger than with all other answers in the batch, and *vice versa*. We use a cross-entropy loss for the multinomial distribution over all of the answers (or questions) in a batch, and make use of [PyTorch gradient checkpointing](https://pytorch.org/docs/stable/checkpoint.html) to be able to use large batches with limited GPU memory: you can find all implementation details in the `RetrievalQAEmbedder` class in `eli5_utils.py`.--- --- To train the retriever, we show the model batches of 512 question-answer pairs. The model needs to ensure that the embedding of each question in the batch is closer to the embedding of its corresponding answer than to the embedding of any other answer in the batch.--- We use a single BERT-style pre-trained model to embed the questions and answers, and learn different projection matrices to bring both representations down to dimension 128: the projection matrices are trained from scratch as the sentence embedding model is fine-tuned. We found that the 8-layer distilled version of BERT from the [Well-Read Students Learn Better paper](https://arxiv.org/abs/1908.08962) performed as well or better as full BERT for a notable gain in computation speed: if you want an even faster model, that work provides pre-trained models spanning the full range of computation/accuracy trade-offs.The model can than be trained with the following code: with batch size 32/512 on a single 16GB GPU, you can run 10 training epochs in under 6 hours.<jupyter_code># training arguments class ArgumentsQAR(): def __init__(self): self.batch_size = 512 self.max_length = 128 self.checkpoint_batch_size = 32 self.print_freq = 100 self.pretrained_model_name = "google/bert_uncased_L-8_H-768_A-12" self.model_save_name = "retriever_models/eli5_retriever_model_l-8_h-768_b-512-512" self.learning_rate = 2e-4 self.num_epochs = 10 qar_args = ArgumentsQAR() # prepare torch Dataset objects qar_train_dset = ELI5DatasetQARetriver(eli5['train_eli5'], training=True) qar_valid_dset = ELI5DatasetQARetriver(eli5['validation_eli5'], training=False) # load pre-trained BERT and make model qar_tokenizer, qar_model = make_qa_retriever_model( model_name=qar_args.pretrained_model_name, from_file=None, device="cuda:0" ) # train the model train_qa_retriever(qar_model, qar_tokenizer, qar_train_dset, qar_valid_dset, qar_args)<jupyter_output><empty_output><jupyter_text>If you don't want to train the model yourself, you can also download trained weights from the [Hugging Face model repository](https://huggingface.co/models) with:<jupyter_code>qar_tokenizer = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased') qar_model = AutoModel.from_pretrained('yjernite/retribert-base-uncased').to('cuda:1') _ = qar_model.eval()<jupyter_output><empty_output><jupyter_text>Once the model is trained, it can be used to compute passage embeddings for all Wikipedia snippets. The `make_qa_dense_index` method takes advantage of `numpy` memory-mapping, so embeddings are written directly to disk. Again with a single GPU, computing the full set of passage embeddings should take about 18 hours.<jupyter_code>if not os.path.isfile('wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat'): make_qa_dense_index( qar_model, qar_tokenizer, wiki40b_snippets, device='cuda:0', index_name='wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' )<jupyter_output><empty_output><jupyter_text>4.b - Using the Trained Dense Retriever and Wikipedia IndexNow that we have trained our model to compute query and answer embeddings and used it to compute passage embeddings for all our Wikipedia snippets, let's see whether it can actually find supporting evidence for a new question. Recall the the two steps to using the dense retriever: we first compute an embedding for a new question, then do Max Inner Product Search with the pre-computed passage representations.--- --- At test time, the Retriever Model encodes the question, and compares its embedding to the pre-computed representation of all the Wikipedia passages. The ten passages with the closest embedding are returned to create the support document.--- The MIPS part can be executed efficiently with the `faiss` library. Additionally, since we computed 128-dimensional passage embeddings, the whole of the representations fits on a GPU, making retrieval even faster. We can create the `faiss_gpu` index with the following code:<jupyter_code>faiss_res = faiss.StandardGpuResources() wiki40b_passage_reps = np.memmap( 'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat', dtype='float32', mode='r', shape=(wiki40b_snippets.num_rows, 128) ) wiki40b_index_flat = faiss.IndexFlatIP(128) wiki40b_gpu_index = faiss.index_cpu_to_gpu(faiss_res, 1, wiki40b_index_flat) wiki40b_gpu_index.add(wiki40b_passage_reps)<jupyter_output><empty_output><jupyter_text>Now we can use the `query_qa_dense_index` function to query the dense index for our running example question about perceived temperature:<jupyter_code>question = eli5['test_eli5'][12345]['title'] doc, res_list = query_qa_dense_index(question, qar_model, qar_tokenizer, wiki40b_snippets, wiki40b_gpu_index, device='cuda:1') df = pd.DataFrame({ 'Article': ['---'] + [res['article_title'] for res in res_list], 'Sections': ['---'] + [res['section_title'] if res['section_title'].strip() != '' else res['article_title'] for res in res_list], 'Text': ['--- ' + question] + [res['passage_text'] for res in res_list], }) df.style.set_properties(**{'text-align': 'left'})<jupyter_output><empty_output><jupyter_text>The retrieved documents are quite different from the ones returned by the sparse retrieval, with a greater focus on how water helps draw heat from a body, either through evaporation or through better conduction, which is information the model needs to answer this question.The retriever still misses out on one aspect of the query: the way the question is formulated implies that in the considered scenario the person is immersed in water rather than just wet, which makes the "latent heat" and evaporation arguments a little less relevant, but that's a really subtle distinction! 4.c - Retriever Model EvaluationWe have trained a retrieval model that *seems* to be working a little better than the traditional word-matching based approach, at least on our running example. Before we use it to actually answer questions, however, we would like to be able to get some **quantitative evaluation** of the performances of both approaches.For the retriever, we want to favor **recall** over precision: our first priority is to make sure that all of the information needed to write the answers is present in the support document. If there is unrelated information, the generation model can learn to sort it out. We measure this by computing the proportion of words in the high-scoring answers which are present in the retrieved support document. To focus on important words, we also weigh answer words by their *Inverse Document Frequency*. This gives us the following **IDF-recall** scoring function:<jupyter_code># We first select high-scoring answers (answers beyond the first must have a score of at least 3) test_qa_list = [(exple['title'], ' '.join([a for i, (a, sc) in enumerate(zip(exple['answers']['text'], exple['answers']['score'])) \ if i == 0 or sc >= 3 ])) for exple in eli5['test_eli5']] # We then compute word frequencies in answer text answer_doc_freq = {} for q, a in test_qa_list: for w in a.lower().split(): answer_doc_freq[w] = answer_doc_freq.get(w, 0) + 1 # The IDF-recall function is then: def da_idf_recall(doc, answer): d_words = dict([(w, True) for w in doc.lower().split()]) a_words = answer.lower().split() recall = sum([1. / math.log(1 + answer_doc_freq.get(w, 1)) for w in a_words if w in d_words]) / \ sum([1. / math.log(1 + answer_doc_freq.get(w, 1)) for w in a_words]) return recall<jupyter_output><empty_output><jupyter_text>The `evaluate_retriever` function in `eli5_utils.py` takes a retrieval and scoring function and computes both the average retrieval time and score of the document relative the the provided answer. Let's write some short-hand functions for the dense and sparse retrievers with our currently loaded indexes, and evaluate them on the ELI5 test set (be advised that evaluating the retriever on the full test set takes up to two hours):<jupyter_code>def dense_ret_for_eval(question, n_ret): _, dense_res_list = query_qa_dense_index( question, qar_model, qar_tokenizer, wiki40b_snippets, wiki40b_gpu_index, n_results=n_ret, device='cuda:1' ) dense_doc = ' '.join([res['passage_text'] for res in dense_res_list]) return dense_doc def sparse_ret_for_eval(question, n_ret): _, sparse_res_list = query_es_index( question, es_client, index_name='wiki40b_snippets_100w', n_results=n_ret ) sparse_doc = ' '.join([res['passage_text'] for res in sparse_res_list]) return sparse_doc dense_score = evaluate_retriever(test_qa_list, dense_ret_for_eval, da_idf_recall) sparse_score = evaluate_retriever(test_qa_list, sparse_ret_for_eval, da_idf_recall) df = pd.DataFrame({ 'IDF-Recall': [sparse_score['idf_recall'], dense_score['idf_recall']], 'Time/Query': [sparse_score['retrieval_time'], dense_score['retrieval_time']], }, index=[ 'Sparse', 'Dense']) df.style.format({'IDF-Recall': "{:.4f}", 'Time/Query': "{:.4f}"})<jupyter_output><empty_output><jupyter_text>This metric obviously has limitations. Since it only looks at individual word matches, it is oblivious to *word order* or *paraphrases* among others. However, we can be encouraged by the fact that the dense retriever not only yields **higher IDF-recall**, it also takes **less than a third of the time** of the ElasticSearch-based system! Considering these results, we can confidently use it for the next part: training the sequence-to-sequence answer generation system. 5. Generating Answers with a Sequence-to-Sequence ModelNow that we know how to create an evidence document with supporting information for a given question, let's look into training the second component of our system: the **answer generation module**. We will instantiate it as a sequence-to-sequence model which uses the [BART](https://arxiv.org/abs/1910.13461) architecture, and initialize it with the [bart-large pretrained weights](https://huggingface.co/facebook/bart-large). In short, the [BART paper](https://arxiv.org/abs/1910.13461) uses a denoising auto-encoder style objective to pre-train an encoder-decoder model (similarly to how masked language modeling is used to pre-trained BERT-style encoders). Among other applications, they show that large-scale pre-training with their objective followed by fine-tuning on ELI5 data yields the state-of-the-art ROUGE performance for the original version of the dataset (which uses pre-computed support documents made from CommonCrawl pages).We provide the concatenation of the question and support document as input to the model, and train the decoder to minimize the perplexity of the gold answer. One notable choice is that **we train the model using all high-scoring answers in the training set**, so the model will see several instances of the same question-document input with different outputs. The supporting passages are separated by a special token ``, so the input for our running example will look like:> question: Why does water heated to room temperature feel colder than the air around it? context: \\ when the skin is completely wet. The body continuously loses ... this heat comes from the liquid itself and the surrounding gas and surfaces. \\ protected by a glass panel. Consequently, these types of collectors... Since heat loss due to convection cannot cross a vacuum, it forms an efficient isolation mechanism to keep heat inside the collector pipes. Since two flat \\ ... \\ changes. Conduction On... Fluids—especially gases—are less conductive. Thermal contact conductance is the study of heat conduction between solid bodies in contact. The process of heat transferThe first thing we do is pre-compute the support documents for the training and validation sets so we can use all available GPUs to train the sequence-to-sequence model. The model is then trained with the `train_qa_s2s` function in `eli5_utils.py`. A 16GB GPU accomodates up to two examples at a time, so here is the code to train the model using 4 GPUs with `torch.nn.DataPArallel`. One epoch should take about 18 hours:<jupyter_code># pre-computing support documents eli5_train_docs = [] for example in eli5['train_eli5']: support_doc, dense_res_list = query_qa_dense_index( example['title'], qar_model, qar_tokenizer, wiki40b_snippets, wiki40b_gpu_index, n_results=n_ret ) eli5_train_docs += [(example['q_id'], support_doc, dense_res_list)] eli5_valid_docs = [] for example in eli5['validation_eli5']: support_doc, dense_res_list = query_qa_dense_index( example['title'], qar_model, qar_tokenizer, wiki40b_snippets, wiki40b_gpu_index, n_results=n_ret ) eli5_valid_docs += [(example['q_id'], support_doc, dense_res_list)] # training loop proper class ArgumentsS2S(): def __init__(self): self.batch_size = 8 self.backward_freq = 16 self.max_length = 1024 self.print_freq = 100 self.model_save_name = "seq2seq_models/eli5_bart_model" self.learning_rate = 2e-4 self.num_epochs = 3 s2s_args = ArgumentsS2S() eli5_train_docs = json.load(open('precomputed/eli5_train_precomputed_dense_docs.json')) eli5_valid_docs = json.load(open('precomputed/eli5_valid_precomputed_dense_docs.json')) s2s_train_dset = ELI5DatasetS2S(eli5['train_eli5'], document_cache=dict([(k, d) for k, d, src_ls in eli5_train_docs])) s2s_valid_dset = ELI5DatasetS2S(eli5['validation_eli5'], document_cache=dict([(k, d) for k, d, src_ls in eli5_valid_docs]), training=False) qa_s2s_tokenizer, pre_model = make_qa_s2s_model( model_name="facebook/bart-large", from_file=None, device="cuda:0" ) qa_s2s_model = torch.nn.DataParallel(pre_model) train_qa_s2s(qa_s2s_model, qa_s2s_tokenizer, s2s_train_dset, s2s_valid_dset, s2s_args)<jupyter_output><empty_output><jupyter_text>Again, if you don't want to train the model yourself, we made trained weights available on the [Hugging Face model repository](https://huggingface.co/models) , which you can download with:<jupyter_code>qa_s2s_tokenizer = AutoTokenizer.from_pretrained('yjernite/bart_eli5') qa_s2s_model = AutoModelForSeq2SeqLM.from_pretrained('yjernite/bart_eli5').to('cuda:0') _ = qa_s2s_model.eval()<jupyter_output><empty_output><jupyter_text>We now have everything we need to answer any question! Now let's try the full system on our running example along with the first four questions of the test set:<jupyter_code>questions = [] answers = [] for i in [12345] + [j for j in range(4)]: # create support document with the dense index question = eli5['test_eli5'][i]['title'] doc, res_list = query_qa_dense_index( question, qar_model, qar_tokenizer, wiki40b_snippets, wiki40b_gpu_index, device='cuda:1' ) # concatenate question and support document into BART input question_doc = "question: {} context: {}".format(question, doc) # generate an answer with beam search answer = qa_s2s_generate( question_doc, qa_s2s_model, qa_s2s_tokenizer, num_answers=1, num_beams=8, min_len=64, max_len=256, max_input_length=1024, device="cuda:0" )[0] questions += [question] answers += [answer] df = pd.DataFrame({ 'Question': questions, 'Answer': answers, }) df.style.set_properties(**{'text-align': 'left'})<jupyter_output><empty_output><jupyter_text>We made it, and a lot of these answers actually make sense! The model seems to sometimes struggle with coherence and with starting some of the answers, but we're getting some pretty good information overall.The last thing we'll do is see how we can get a quantitative evaluation of the model performance. Here, we'll use the ROUGE implementation provided in the `nlp` library. Note that it is a different implementation than the one used in the [BART](https://arxiv.org/abs/1910.13461) and [ELI5](https://arxiv.org/abs/1907.09190) papers: the [rouge](https://pypi.org/project/rouge/) Python package they use normalises all numerical values, among other pre-processing choices, leading to higher numbers. We reproduce their evaluation in the Appendix section, but recommend using the more sensitive metric provided by the `nlp` package, which can be computed with:<jupyter_code>predicted = [] reference = [] # Generate answers for the full test set for i in range(eli5['test_eli5'].num_rows): # create support document with the dense index question = eli5['test_eli5'][i]['title'] doc, res_list = query_qa_dense_index( question, qar_model, qar_tokenizer, wiki40b_snippets, wiki40b_gpu_index, device='cuda:1' ) # concatenate question and support document into BART input question_doc = "question: {} context: {}".format(question, doc) # generate an answer with beam search answer = qa_s2s_generate( question_doc, qa_s2s_model, qa_s2s_tokenizer, num_answers=1, num_beams=8, min_len=96, max_len=256, max_input_length=1024, device="cuda:0" )[0] predicted += [answer] reference += [eli5['test_eli5'][i]['answers']['text'][0]] # Compare each generation to the fist answer from the dataset nlp_rouge = nlp.load_metric('rouge') scores = nlp_rouge.compute( predicted, reference, rouge_types=['rouge1', 'rouge2', 'rougeL', 'rougeLsum'], use_agregator=True, use_stemmer=False ) df = pd.DataFrame({ 'rouge1': [scores['rouge1'].mid.precision, scores['rouge1'].mid.recall, scores['rouge1'].mid.fmeasure], 'rouge2': [scores['rouge2'].mid.precision, scores['rouge2'].mid.recall, scores['rouge2'].mid.fmeasure], 'rougeL': [scores['rougeL'].mid.precision, scores['rougeL'].mid.recall, scores['rougeL'].mid.fmeasure], }, index=[ 'P', 'R', 'F']) df.style.format({'rouge1': "{:.4f}", 'rouge2': "{:.4f}", 'rougeL': "{:.4f}"})<jupyter_output><empty_output><jupyter_text>That's it for today! And once again, if you want to play with the model a bit more and ask it whatever question comes to mind, please feel free to head over to: [**Our Live Demo!**](https://huggingface.co/qa/) Thank you for reading! Appendix:Here we reproduce the ROUGE evaluation from the original [ELI5 paper](https://arxiv.org/abs/1907.09190) to be able to comparable our performance to theirs. Our generation setting leads to lower ROUGE-1 and ROUGE-2 than the state-of-the-art reported in [BART](https://arxiv.org/abs/1910.13461) (30.6 and 6.2 respectively), and higher ROUGE-L (24.3).<jupyter_code>from nltk import PorterStemmer from rouge import Rouge from spacy.lang.en import English from time import time stemmer = PorterStemmer() rouge = Rouge() tokenizer = English().Defaults.create_tokenizer() def compute_rouge_eli5(compare_list): preds = [" ".join([stemmer.stem(str(w)) for w in tokenizer(pred)]) for gold, pred in compare_list] golds = [" ".join([stemmer.stem(str(w)) for w in tokenizer(gold)]) for gold, pred in compare_list] scores = rouge.get_scores(preds, golds, avg=True) return scores compare_list = [(g, p) for p, g in zip(predicted, reference)] scores = compute_rouge_eli5(compare_list) df = pd.DataFrame({ 'rouge1': [scores['rouge-1']['p'], scores['rouge-1']['r'], scores['rouge-1']['f']], 'rouge2': [scores['rouge-2']['p'], scores['rouge-2']['r'], scores['rouge-2']['f']], 'rougeL': [scores['rouge-l']['p'], scores['rouge-l']['r'], scores['rouge-l']['f']], }, index=[ 'P', 'R', 'F']) df.style.format({'rouge1': "{:.4f}", 'rouge2': "{:.4f}", 'rougeL': "{:.4f}"})<jupyter_output><empty_output>
notebooks/longform-qa/Long_Form_Question_Answering_with_ELI5_and_Wikipedia.ipynb/0
{ "file_path": "notebooks/longform-qa/Long_Form_Question_Answering_with_ELI5_and_Wikipedia.ipynb", "repo_id": "notebooks", "token_count": 13060 }
149
<jupyter_start><jupyter_text>Huggingface Sagemaker-sdk - Distributed Training Demo Distributed Summarization with `transformers` scripts + `Trainer` and `samsum` dataset 1. [Tutorial](Tutorial) 2. [Set up a development environment and install sagemaker](Set-up-a-development-environment-and-install-sagemaker) 1. [Installation](Installation) 2. [Development environment](Development-environment) 3. [Permissions](Permissions) 4. [Choose 🤗 Transformers `examples/` script](Choose-%F0%9F%A4%97-Transformers-examples/-script) 1. [Configure distributed training and hyperparameters](Configure-distributed-training-and-hyperparameters) 2. [Create a `HuggingFace` estimator and start training](Create-a-HuggingFace-estimator-and-start-training) 3. [Upload the fine-tuned model to huggingface.co](Upload-the-fine-tuned-model-to-huggingface.co) TutorialWe will use the new [Hugging Face DLCs](https://github.com/aws/deep-learning-containers/tree/master/huggingface) and [Amazon SageMaker extension](https://sagemaker.readthedocs.io/en/stable/frameworks/huggingface/sagemaker.huggingface.htmlhuggingface-estimator) to train a distributed Seq2Seq-transformer model on `summarization` using the `transformers` and `datasets` libraries and upload it afterwards to [huggingface.co](http://huggingface.co) and test it.As [distributed training strategy](https://huggingface.co/transformers/sagemaker.htmldistributed-training-data-parallel) we are going to use [SageMaker Data Parallelism](https://aws.amazon.com/blogs/aws/managed-data-parallelism-in-amazon-sagemaker-simplifies-training-on-large-datasets/), which has been built into the [Trainer](https://huggingface.co/transformers/main_classes/trainer.html) API. To use data-parallelism we only have to define the `distribution` parameter in our `HuggingFace` estimator.```python configuration for running training on smdistributed Data Paralleldistribution = {'smdistributed':{'dataparallel':{ 'enabled': True }}}```In this tutorial, we will use an Amazon SageMaker Notebook Instance for running our training job. You can learn [here how to set up a Notebook Instance](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi.html).**What are we going to do:**- Set up a development environment and install sagemaker- Chose 🤗 Transformers `examples/` script- Configure distributed training and hyperparameters- Create a `HuggingFace` estimator and start training- Upload the fine-tuned model to [huggingface.co](http://huggingface.co)- Test inference Model and DatasetWe are going to fine-tune [facebook/bart-base](https://huggingface.co/facebook/bart-base) on the [samsum](https://huggingface.co/datasets/samsum) dataset. *"BART is sequence-to-sequence model trained with denoising as pretraining objective."* [[REF](https://github.com/pytorch/fairseq/blob/master/examples/bart/README.md)]The `samsum` dataset contains about 16k messenger-like conversations with summaries. ```python{'id': '13818513', 'summary': 'Amanda baked cookies and will bring Jerry some tomorrow.', 'dialogue': "Amanda: I baked cookies. Do you want some?\r\nJerry: Sure!\r\nAmanda: I'll bring you tomorrow :-)"}```_**NOTE: You can run this demo in Sagemaker Studio, your local machine or Sagemaker Notebook Instances**_ Set up a development environment and install sagemaker Installation_**Note:** The use of Jupyter is optional: We could also launch SageMaker Training jobs from anywhere we have an SDK installed, connectivity to the cloud and appropriate permissions, such as a Laptop, another IDE or a task scheduler like Airflow or AWS Step Functions._<jupyter_code>!pip install "sagemaker>=2.48.0" --upgrade #!apt install git-lfs !curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.rpm.sh | sudo bash !sudo yum install git-lfs -y !git lfs install<jupyter_output><empty_output><jupyter_text>Development environment<jupyter_code>import sagemaker.huggingface<jupyter_output><empty_output><jupyter_text>Permissions _If you are going to use Sagemaker in a local environment. You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it._<jupyter_code>import sagemaker import boto3 sess = sagemaker.Session() # sagemaker session bucket -> used for uploading data, models and logs # sagemaker will automatically create this bucket if it not exists sagemaker_session_bucket=None if sagemaker_session_bucket is None and sess is not None: # set to default bucket if a bucket name is not given sagemaker_session_bucket = sess.default_bucket() try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] sess = sagemaker.Session(default_bucket=sagemaker_session_bucket) print(f"sagemaker role arn: {role}") print(f"sagemaker bucket: {sess.default_bucket()}") print(f"sagemaker session region: {sess.boto_region_name}")<jupyter_output><empty_output><jupyter_text>Choose 🤗 Transformers `examples/` scriptThe [🤗 Transformers repository](https://github.com/huggingface/transformers/tree/master/examples) contains several `examples/`scripts for fine-tuning models on tasks from `language-modeling` to `token-classification`. In our case, we are using the `run_summarization.py` from the `seq2seq/` examples. _**Note**: you can use this tutorial identical to train your model on a different examples script._Since the `HuggingFace` Estimator has git support built-in, we can specify a [training script that is stored in a GitHub repository](https://sagemaker.readthedocs.io/en/stable/overview.htmluse-scripts-stored-in-a-git-repository) as `entry_point` and `source_dir`.We are going to use the `transformers 4.4.2` DLC which means we need to configure the `v4.4.2` as the branch to pull the compatible example scripts.<jupyter_code>git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.26.0'} # v4.6.1 is referring to the `transformers_version` you use in the estimator.<jupyter_output><empty_output><jupyter_text>Configure distributed training and hyperparametersNext, we will define our `hyperparameters` and configure our distributed training strategy. As hyperparameter, we can define any [Seq2SeqTrainingArguments](https://huggingface.co/transformers/main_classes/trainer.htmlseq2seqtrainingarguments) and the ones defined in [run_summarization.py](https://github.com/huggingface/transformers/tree/master/examples/seq2seqsequence-to-sequence-training-and-evaluation).<jupyter_code># hyperparameters, which are passed into the training job hyperparameters={'per_device_train_batch_size': 4, 'per_device_eval_batch_size': 4, 'model_name_or_path': 'facebook/bart-large-cnn', 'dataset_name': 'samsum', 'do_train': True, 'do_eval': True, 'do_predict': True, 'predict_with_generate': True, 'output_dir': '/opt/ml/model', 'num_train_epochs': 3, 'learning_rate': 5e-5, 'seed': 7, 'fp16': True, } # configuration for running training on smdistributed Data Parallel distribution = {'smdistributed':{'dataparallel':{ 'enabled': True }}}<jupyter_output><empty_output><jupyter_text>Create a `HuggingFace` estimator and start training<jupyter_code>from sagemaker.huggingface import HuggingFace # create the Estimator huggingface_estimator = HuggingFace( entry_point='run_summarization.py', # script source_dir='./examples/pytorch/summarization', # relative path to example git_config=git_config, instance_type='ml.p3dn.24xlarge', instance_count=2, transformers_version='4.26.0', pytorch_version='1.13.1', py_version='py39', role=role, hyperparameters = hyperparameters, distribution = distribution ) # starting the train job huggingface_estimator.fit()<jupyter_output><empty_output><jupyter_text>Deploying the endpointTo deploy our endpoint, we call `deploy()` on our HuggingFace estimator object, passing in our desired number of instances and instance type.<jupyter_code>predictor = huggingface_estimator.deploy(1,"ml.g4dn.xlarge")<jupyter_output><empty_output><jupyter_text>Then, we use the returned predictor object to call the endpoint.<jupyter_code>conversation = '''Jeff: Can I train a 🤗 Transformers model on Amazon SageMaker? Philipp: Sure you can use the new Hugging Face Deep Learning Container. Jeff: ok. Jeff: and how can I get started? Jeff: where can I find documentation? Philipp: ok, ok you can find everything here. https://huggingface.co/blog/the-partnership-amazon-sagemaker-and-hugging-face ''' data= {"inputs":conversation} predictor.predict(data)<jupyter_output><empty_output><jupyter_text>Finally, we delete the endpoint again.<jupyter_code>predictor.delete_endpoint()<jupyter_output><empty_output><jupyter_text>Upload the fine-tuned model to [huggingface.co](http://huggingface.co)We can download our model from Amazon S3 and unzip it using the following snippet.<jupyter_code>import os import tarfile from sagemaker.s3 import S3Downloader local_path = 'my_bart_model' os.makedirs(local_path, exist_ok = True) # download model from S3 S3Downloader.download( s3_uri=huggingface_estimator.model_data, # s3 uri where the trained model is located local_path=local_path, # local path where *.targ.gz is saved sagemaker_session=sess # sagemaker session used for training the model ) # unzip model tar = tarfile.open(f"{local_path}/model.tar.gz", "r:gz") tar.extractall(path=local_path) tar.close() os.remove(f"{local_path}/model.tar.gz")<jupyter_output><empty_output><jupyter_text>Before we are going to upload our model to [huggingface.co](http://huggingface.co) we need to create a `model_card`. The `model_card` describes the model includes hyperparameters, results and which dataset was used for training. To create a `model_card` we create a `README.md` in our `local_path`<jupyter_code># read eval and test results with open(f"{local_path}/eval_results.json") as f: eval_results_raw = json.load(f) eval_results={} eval_results["eval_rouge1"] = eval_results_raw["eval_rouge1"] eval_results["eval_rouge2"] = eval_results_raw["eval_rouge2"] eval_results["eval_rougeL"] = eval_results_raw["eval_rougeL"] eval_results["eval_rougeLsum"] = eval_results_raw["eval_rougeLsum"] with open(f"{local_path}/test_results.json") as f: test_results_raw = json.load(f) test_results={} test_results["test_rouge1"] = test_results_raw["test_rouge1"] test_results["test_rouge2"] = test_results_raw["test_rouge2"] test_results["test_rougeL"] = test_results_raw["test_rougeL"] test_results["test_rougeLsum"] = test_results_raw["test_rougeLsum"]<jupyter_output><empty_output><jupyter_text>After we extract all the metrics we want to include we are going to create our `README.md`. Additionally to the automated generation of the results table we add the metrics manually to the `metadata` of our model card under `model-index`<jupyter_code>print(eval_results) print(test_results) import json MODEL_CARD_TEMPLATE = """ --- language: en tags: - sagemaker - bart - summarization license: apache-2.0 datasets: - samsum model-index: - name: {model_name} results: - task: name: Abstractive Text Summarization type: abstractive-text-summarization dataset: name: "SAMSum Corpus: A Human-annotated Dialogue Dataset for Abstractive Summarization" type: samsum metrics: - name: Validation ROGUE-1 type: rogue-1 value: 42.621 - name: Validation ROGUE-2 type: rogue-2 value: 21.9825 - name: Validation ROGUE-L type: rogue-l value: 33.034 - name: Test ROGUE-1 type: rogue-1 value: 41.3174 - name: Test ROGUE-2 type: rogue-2 value: 20.8716 - name: Test ROGUE-L type: rogue-l value: 32.1337 widget: - text: | Jeff: Can I train a 🤗 Transformers model on Amazon SageMaker? Philipp: Sure you can use the new Hugging Face Deep Learning Container. Jeff: ok. Jeff: and how can I get started? Jeff: where can I find documentation? Philipp: ok, ok you can find everything here. https://huggingface.co/blog/the-partnership-amazon-sagemaker-and-hugging-face --- ## `{model_name}` This model was trained using Amazon SageMaker and the new Hugging Face Deep Learning container. For more information look at: - [🤗 Transformers Documentation: Amazon SageMaker](https://huggingface.co/transformers/sagemaker.html) - [Example Notebooks](https://github.com/huggingface/notebooks/tree/master/sagemaker) - [Amazon SageMaker documentation for Hugging Face](https://docs.aws.amazon.com/sagemaker/latest/dg/hugging-face.html) - [Python SDK SageMaker documentation for Hugging Face](https://sagemaker.readthedocs.io/en/stable/frameworks/huggingface/index.html) - [Deep Learning Container](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) ## Hyperparameters {hyperparameters} ## Usage from transformers import pipeline summarizer = pipeline("summarization", model="philschmid/{model_name}") conversation = '''Jeff: Can I train a 🤗 Transformers model on Amazon SageMaker? Philipp: Sure you can use the new Hugging Face Deep Learning Container. Jeff: ok. Jeff: and how can I get started? Jeff: where can I find documentation? Philipp: ok, ok you can find everything here. https://huggingface.co/blog/the-partnership-amazon-sagemaker-and-hugging-face ''' summarizer(conversation) ## Results | key | value | | --- | ----- | {eval_table} {test_table} """ # Generate model card (todo: add more data from Trainer) model_card = MODEL_CARD_TEMPLATE.format( model_name=f"{hyperparameters['model_name_or_path'].split('/')[1]}-{hyperparameters['dataset_name']}", hyperparameters=json.dumps(hyperparameters, indent=4, sort_keys=True), eval_table="\n".join(f"| {k} | {v} |" for k, v in eval_results.items()), test_table="\n".join(f"| {k} | {v} |" for k, v in test_results.items()), ) with open(f"{local_path}/README.md", "w") as f: f.write(model_card)<jupyter_output><empty_output><jupyter_text>After we have our unzipped model and model card located in `my_bart_model` we can use the either `huggingface_hub` SDK to create a repository and upload it to [huggingface.co](http://huggingface.co) or go to https://huggingface.co/new an create a new repository and upload it.<jupyter_code>from getpass import getpass from huggingface_hub import HfApi, Repository hf_username = "philschmid" # your username on huggingface.co hf_email = "[email protected]" # email used for commit repository_name = f"{hyperparameters['model_name_or_path'].split('/')[1]}-{hyperparameters['dataset_name']}" # repository name on huggingface.co password = getpass("Enter your password:") # creates a prompt for entering password # get hf token token = HfApi().login(username=hf_username, password=password) # create repository repo_url = HfApi().create_repo(token=token, name=repository_name, exist_ok=True) # create a Repository instance model_repo = Repository(use_auth_token=token, clone_from=repo_url, local_dir=local_path, git_user=hf_username, git_email=hf_email) # push model to the hub model_repo.push_to_hub() print(f"https://huggingface.co/{hf_username}/{repository_name}")<jupyter_output><empty_output>
notebooks/sagemaker/08_distributed_summarization_bart_t5/sagemaker-notebook.ipynb/0
{ "file_path": "notebooks/sagemaker/08_distributed_summarization_bart_t5/sagemaker-notebook.ipynb", "repo_id": "notebooks", "token_count": 5673 }
150
import argparse import logging import os import random import sys import numpy as np import torch from datasets import load_from_disk, load_metric from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments from transformers.trainer_utils import get_last_checkpoint if __name__ == "__main__": parser = argparse.ArgumentParser() # hyperparameters sent by the client are passed as command-line arguments to the script. parser.add_argument("--epochs", type=int, default=3) parser.add_argument("--train_batch_size", type=int, default=32) parser.add_argument("--eval_batch_size", type=int, default=64) parser.add_argument("--warmup_steps", type=int, default=500) parser.add_argument("--model_id", type=str) parser.add_argument("--learning_rate", type=str, default=5e-5) parser.add_argument("--fp16", type=bool, default=True) # Push to Hub Parameters parser.add_argument("--push_to_hub", type=bool, default=True) parser.add_argument("--hub_model_id", type=str, default=None) parser.add_argument("--hub_strategy", type=str, default=None) parser.add_argument("--hub_token", type=str, default=None) # Data, model, and output directories parser.add_argument("--output_data_dir", type=str, default=os.environ["SM_OUTPUT_DATA_DIR"]) parser.add_argument("--output_dir", type=str, default=os.environ["SM_MODEL_DIR"]) parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"]) parser.add_argument("--training_dir", type=str, default=os.environ["SM_CHANNEL_TRAIN"]) parser.add_argument("--test_dir", type=str, default=os.environ["SM_CHANNEL_TEST"]) args, _ = parser.parse_known_args() # make sure we have required parameters to push if args.push_to_hub: if args.hub_strategy is None: raise ValueError("--hub_strategy is required when pushing to Hub") if args.hub_token is None: raise ValueError("--hub_token is required when pushing to Hub") # sets hub id if not provided if args.hub_model_id is None: args.hub_model_id = args.model_id.replace("/", "--") # Set up logging logger = logging.getLogger(__name__) logging.basicConfig( level=logging.getLevelName("INFO"), handlers=[logging.StreamHandler(sys.stdout)], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) # load datasets train_dataset = load_from_disk(args.training_dir) test_dataset = load_from_disk(args.test_dir) logger.info(f" loaded train_dataset length is: {len(train_dataset)}") logger.info(f" loaded test_dataset length is: {len(test_dataset)}") # define metrics and metrics function metric = load_metric("accuracy") def compute_metrics(eval_pred): predictions, labels = eval_pred predictions = np.argmax(predictions, axis=1) return metric.compute(predictions=predictions, references=labels) # Prepare model labels - useful in inference API labels = train_dataset.features["labels"].names num_labels = len(labels) label2id, id2label = dict(), dict() for i, label in enumerate(labels): label2id[label] = str(i) id2label[str(i)] = label # download model from model hub model = AutoModelForSequenceClassification.from_pretrained( args.model_id, num_labels=num_labels, label2id=label2id, id2label=id2label ) tokenizer = AutoTokenizer.from_pretrained(args.model_id) # define training args training_args = TrainingArguments( output_dir=args.output_dir, overwrite_output_dir=True if get_last_checkpoint(args.output_dir) is not None else False, num_train_epochs=args.epochs, per_device_train_batch_size=args.train_batch_size, per_device_eval_batch_size=args.eval_batch_size, warmup_steps=args.warmup_steps, fp16=args.fp16, evaluation_strategy="epoch", save_strategy="epoch", save_total_limit=2, logging_dir=f"{args.output_data_dir}/logs", learning_rate=float(args.learning_rate), load_best_model_at_end=True, metric_for_best_model="accuracy", # push to hub parameters push_to_hub=args.push_to_hub, hub_strategy=args.hub_strategy, hub_model_id=args.hub_model_id, hub_token=args.hub_token, ) # create Trainer instance trainer = Trainer( model=model, args=training_args, compute_metrics=compute_metrics, train_dataset=train_dataset, eval_dataset=test_dataset, tokenizer=tokenizer, ) # train model trainer.train() # evaluate model eval_result = trainer.evaluate(eval_dataset=test_dataset) # save best model, metrics and create model card trainer.create_model_card(model_name=args.hub_model_id) trainer.push_to_hub() # Saves the model to s3 uses os.environ["SM_MODEL_DIR"] to make sure checkpointing works trainer.save_model(os.environ["SM_MODEL_DIR"])
notebooks/sagemaker/14_train_and_push_to_hub/scripts/train.py/0
{ "file_path": "notebooks/sagemaker/14_train_and_push_to_hub/scripts/train.py", "repo_id": "notebooks", "token_count": 1998 }
151
<jupyter_start><jupyter_text>Serverless Inference with Hugging Face's Transformers & Amazon SageMaker Welcome to this getting started guide. We will use the Hugging Face Inference DLCs and Amazon SageMaker Python SDK to create a [Serverless Inference](https://docs.aws.amazon.com/sagemaker/latest/dg/serverless-endpoints.html) endpoint.Amazon SageMaker Serverless Inference is a new capability in SageMaker that enables you to deploy and scale ML models in a Serverless fashion. Serverless endpoints automatically launch compute resources and scale them in and out depending on traffic similar to AWS Lambda.Serverless Inference is ideal for workloads which have idle periods between traffic spurts and can tolerate cold starts. With a pay-per-use model, Serverless Inference is a cost-effective option if you have an infrequent or unpredictable traffic pattern. How it works The following diagram shows the workflow of Serverless Inference and the benefits of using a serverless endpoint.When you create a serverless endpoint, SageMaker provisions and manages the compute resources for you. Then, you can make inference requests to the endpoint and receive model predictions in response. SageMaker scales the compute resources up and down as needed to handle your request traffic, and you only pay for what you use. LimitationsMemory size: 1024 MB, 2048 MB, 3072 MB, 4096 MB, 5120 MB, or 6144 MB Concurrent invocations: 50 per region Cold starts: ms to seconds. Can be monitored with the `ModelSetupTime` Cloudwatch Metric _NOTE: You can run this demo in Sagemaker Studio, your local machine, or Sagemaker Notebook Instances_ Development Environment and Permissions Installation<jupyter_code>!pip install sagemaker --upgrade<jupyter_output><empty_output><jupyter_text>Permissions_If you are going to use Sagemaker in a local environment (not SageMaker Studio or Notebook Instances). You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it._<jupyter_code>import sagemaker import boto3 sess = sagemaker.Session() # sagemaker session bucket -> used for uploading data, models and logs # sagemaker will automatically create this bucket if it not exists sagemaker_session_bucket=None if sagemaker_session_bucket is None and sess is not None: # set to default bucket if a bucket name is not given sagemaker_session_bucket = sess.default_bucket() try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] sess = sagemaker.Session(default_bucket=sagemaker_session_bucket) print(f"sagemaker role arn: {role}") print(f"sagemaker bucket: {sess.default_bucket()}") print(f"sagemaker session region: {sess.boto_region_name}")<jupyter_output>Couldn't call 'get_role' to get Role ARN from role name philippschmid to get Role path.<jupyter_text>Create Inference `HuggingFaceModel` for the Serverless Inference EndpointWe use the [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) model running our serverless endpoint. This model is a fine-tune checkpoint of [DistilBERT-base-uncased](https://huggingface.co/distilbert-base-uncased), fine-tuned on SST-2. This model reaches an accuracy of 91.3 on the dev set (for comparison, Bert bert-base-uncased version reaches an accuracy of 92.7).<jupyter_code>from sagemaker.huggingface.model import HuggingFaceModel from sagemaker.serverless import ServerlessInferenceConfig # Hub Model configuration. <https://huggingface.co/models> hub = { 'HF_MODEL_ID':'distilbert-base-uncased-finetuned-sst-2-english', 'HF_TASK':'text-classification' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( env=hub, # configuration for loading model from Hub role=role, # iam role with permissions to create an Endpoint transformers_version="4.26", # transformers version used pytorch_version="1.13", # pytorch version used py_version='py39', # python version used ) # Specify MemorySizeInMB and MaxConcurrency in the serverless config object serverless_config = ServerlessInferenceConfig( memory_size_in_mb=4096, max_concurrency=10, ) # deploy the endpoint endpoint predictor = huggingface_model.deploy( serverless_inference_config=serverless_config )<jupyter_output>---<jupyter_text>Request Serverless Inference Endpoint using the `HuggingFacePredictor`The `.deploy()` returns an `HuggingFacePredictor` object which can be used to request inference. This `HuggingFacePredictor` makes it easy to send requests to your endpoint and get the results back._The first request might have some coldstart (2-5s)._<jupyter_code>data = { "inputs": "the mesmerizing performances of the leads keep the film grounded and keep the audience riveted .", } res = predictor.predict(data=data) print(res)<jupyter_output>[{'label': 'POSITIVE', 'score': 0.9998838901519775}]<jupyter_text>Clean up<jupyter_code>predictor.delete_model() predictor.delete_endpoint()<jupyter_output><empty_output>
notebooks/sagemaker/19_serverless_inference/sagemaker-notebook.ipynb/0
{ "file_path": "notebooks/sagemaker/19_serverless_inference/sagemaker-notebook.ipynb", "repo_id": "notebooks", "token_count": 1569 }
152
import base64 import torch from io import BytesIO from diffusers import StableDiffusionPipeline def model_fn(model_dir): # Load stable diffusion and move it to the GPU pipe = StableDiffusionPipeline.from_pretrained(model_dir, torch_dtype=torch.float16) pipe = pipe.to("cuda") return pipe def predict_fn(data, pipe): # get prompt & parameters prompt = data.pop("inputs", data) # set valid HP for stable diffusion num_inference_steps = data.pop("num_inference_steps", 50) guidance_scale = data.pop("guidance_scale", 7.5) num_images_per_prompt = data.pop("num_images_per_prompt", 4) # run generation with parameters generated_images = pipe(prompt,num_inference_steps=num_inference_steps,guidance_scale=guidance_scale,num_images_per_prompt=num_images_per_prompt)["images"] # create response encoded_images=[] for image in generated_images: buffered = BytesIO() image.save(buffered, format="JPEG") encoded_images.append(base64.b64encode(buffered.getvalue()).decode()) # create response return {"generated_images": encoded_images}
notebooks/sagemaker/23_stable_diffusion_inference/code/inference.py/0
{ "file_path": "notebooks/sagemaker/23_stable_diffusion_inference/code/inference.py", "repo_id": "notebooks", "token_count": 400 }
153
import os import argparse from transformers import ( AutoModelForCausalLM, AutoTokenizer, set_seed, default_data_collator, ) from datasets import load_from_disk import torch from transformers import Seq2SeqTrainingArguments, Seq2SeqTrainer, DonutProcessor, VisionEncoderDecoderModel,VisionEncoderDecoderConfig import shutil import logging import sys import json def parse_arge(): """Parse the arguments.""" parser = argparse.ArgumentParser() # add model id and dataset path argument parser.add_argument( "--model_id", type=str, default="naver-clova-ix/donut-base", help="Model id to use for training.", ) parser.add_argument("--special_tokens", type=str, default=None, help="JSON string of special tokens to add to tokenizer.") parser.add_argument("--dataset_path", type=str, default="lm_dataset", help="Path to dataset.") # add training hyperparameters for epochs, batch size, learning rate, and seed parser.add_argument("--epochs", type=int, default=3, help="Number of epochs to train for.") parser.add_argument( "--per_device_train_batch_size", type=int, default=1, help="Batch size to use for training.", ) parser.add_argument("--lr", type=float, default=5e-5, help="Learning rate to use for training.") parser.add_argument("--seed", type=int, default=42, help="Seed to use for training.") parser.add_argument( "--gradient_checkpointing", type=bool, default=False, help="Path to deepspeed config file.", ) parser.add_argument( "--bf16", type=bool, default=True if torch.cuda.get_device_capability()[0] == 8 else False, help="Whether to use bf16.", ) args = parser.parse_known_args() return args def training_function(args): # set seed set_seed(args.seed) # Set up logging logger = logging.getLogger(__name__) logging.basicConfig( level=logging.getLevelName("INFO"), handlers=[logging.StreamHandler(sys.stdout)], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) # load datasets train_dataset = load_from_disk(args.dataset_path) image_size = list(torch.tensor(train_dataset[0]["pixel_values"][0]).shape) # height, width logger.info(f"loaded train_dataset length is: {len(train_dataset)}") # Load processor and set up new special tokens processor = DonutProcessor.from_pretrained(args.model_id) # add new special tokens to tokenizer and resize feature extractor special_tokens = args.special_tokens.split(",") processor.tokenizer.add_special_tokens({"additional_special_tokens": special_tokens}) processor.feature_extractor.size = image_size[::-1] # should be (width, height) processor.feature_extractor.do_align_long_axis = False # Load model from huggingface.co config = VisionEncoderDecoderConfig.from_pretrained(args.model_id, use_cache=False if args.gradient_checkpointing else True) model = VisionEncoderDecoderModel.from_pretrained(args.model_id, config=config) # Resize embedding layer to match vocabulary size & adjust our image size and output sequence lengths model.decoder.resize_token_embeddings(len(processor.tokenizer)) model.config.encoder.image_size = image_size model.config.decoder.max_length = len(max(train_dataset["labels"], key=len)) # Add task token for decoder to start model.config.pad_token_id = processor.tokenizer.pad_token_id model.config.decoder_start_token_id = processor.tokenizer.convert_tokens_to_ids(['<s>'])[0] # Arguments for training output_dir = "/tmp" training_args = Seq2SeqTrainingArguments( output_dir=output_dir, num_train_epochs=args.epochs, learning_rate=args.lr, per_device_train_batch_size=args.per_device_train_batch_size, bf16=True, tf32=True, gradient_checkpointing=args.gradient_checkpointing, logging_steps=10, save_total_limit=1, evaluation_strategy="no", save_strategy="epoch", ) # Create Trainer trainer = Seq2SeqTrainer( model=model, args=training_args, train_dataset=train_dataset, ) # Start training trainer.train() # save model and processor trainer.model.save_pretrained("/opt/ml/model/") processor.save_pretrained("/opt/ml/model/") # copy inference script os.makedirs("/opt/ml/model/code", exist_ok=True) shutil.copyfile( os.path.join(os.path.dirname(__file__), "inference.py"), "/opt/ml/model/code/inference.py", ) def main(): args, _ = parse_arge() training_function(args) if __name__ == "__main__": main()
notebooks/sagemaker/26_document_ai_donut/scripts/train.py/0
{ "file_path": "notebooks/sagemaker/26_document_ai_donut/scripts/train.py", "repo_id": "notebooks", "token_count": 1878 }
154
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # IA3 This conceptual guide gives a brief overview of [IA3](https://arxiv.org/abs/2205.05638), a parameter-efficient fine tuning technique that is intended to improve over [LoRA](./lora). To make fine-tuning more efficient, IA3 (Infused Adapter by Inhibiting and Amplifying Inner Activations) rescales inner activations with learned vectors. These learned vectors are injected in the attention and feedforward modules in a typical transformer-based architecture. These learned vectors are the only trainable parameters during fine-tuning, and thus the original weights remain frozen. Dealing with learned vectors (as opposed to learned low-rank updates to a weight matrix like LoRA) keeps the number of trainable parameters much smaller. Being similar to LoRA, IA3 carries many of the same advantages: * IA3 makes fine-tuning more efficient by drastically reducing the number of trainable parameters. (For T0, an IA3 model only has about 0.01% trainable parameters, while even LoRA has > 0.1%) * The original pre-trained weights are kept frozen, which means you can have multiple lightweight and portable IA3 models for various downstream tasks built on top of them. * Performance of models fine-tuned using IA3 is comparable to the performance of fully fine-tuned models. * IA3 does not add any inference latency because adapter weights can be merged with the base model. In principle, IA3 can be applied to any subset of weight matrices in a neural network to reduce the number of trainable parameters. Following the authors' implementation, IA3 weights are added to the key, value and feedforward layers of a Transformer model. To be specific, for transformer models, IA3 weights are added to the outputs of key and value layers, and to the input of the second feedforward layer in each transformer block. Given the target layers for injecting IA3 parameters, the number of trainable parameters can be determined based on the size of the weight matrices. ## Common IA3 parameters in PEFT As with other methods supported by PEFT, to fine-tune a model using IA3, you need to: 1. Instantiate a base model. 2. Create a configuration (`IA3Config`) where you define IA3-specific parameters. 3. Wrap the base model with `get_peft_model()` to get a trainable `PeftModel`. 4. Train the `PeftModel` as you normally would train the base model. `IA3Config` allows you to control how IA3 is applied to the base model through the following parameters: - `target_modules`: The modules (for example, attention blocks) to apply the IA3 vectors. - `feedforward_modules`: The list of modules to be treated as feedforward layers in `target_modules`. While learned vectors are multiplied with the output activation for attention blocks, the vectors are multiplied with the input for classic feedforward layers. Note that `feedforward_modules` must be a subset of `target_modules`. - `modules_to_save`: List of modules apart from IA3 layers to be set as trainable and saved in the final checkpoint. These typically include model's custom head that is randomly initialized for the fine-tuning task. ## Example Usage For the task of sequence classification, one can initialize the IA3 config for a Llama model as follows: ```py peft_config = IA3Config( task_type=TaskType.SEQ_CLS, target_modules=["k_proj", "v_proj", "down_proj"], feedforward_modules=["down_proj"] ) ```
peft/docs/source/conceptual_guides/ia3.md/0
{ "file_path": "peft/docs/source/conceptual_guides/ia3.md", "repo_id": "peft", "token_count": 1030 }
155
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # int8 training for automatic speech recognition Quantization reduces the precision of floating point data types, decreasing the memory required to store model weights. However, quantization degrades inference performance because you lose information when you reduce the precision. 8-bit or `int8` quantization uses only a quarter precision, but it does not degrade performance because it doesn't just drop the bits or data. Instead, `int8` quantization *rounds* from one data type to another. <Tip> 💡 Read the [LLM.int8(): 8-bit Matrix Multiplication for Transformers at Scale](https://arxiv.org/abs/2208.07339) paper to learn more, or you can take a look at the corresponding [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration) for a gentler introduction. </Tip> This guide will show you how to train a [`openai/whisper-large-v2`](https://huggingface.co/openai/whisper-large-v2) model for multilingual automatic speech recognition (ASR) using a combination of `int8` quantization and LoRA. You'll train Whisper for multilingual ASR on Marathi from the [Common Voice 11.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0) dataset. Before you start, make sure you have all the necessary libraries installed: ```bash !pip install -q peft transformers datasets accelerate evaluate jiwer bitsandbytes ``` ## Setup Let's take care of some of the setup first so you can start training faster later. Set the `CUDA_VISIBLE_DEVICES` to `0` to use the first GPU on your machine. Then you can specify the model name (either a Hub model repository id or a path to a directory containing the model), language and language abbreviation to train on, the task type, and the dataset name: ```py import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" model_name_or_path = "openai/whisper-large-v2" language = "Marathi" language_abbr = "mr" task = "transcribe" dataset_name = "mozilla-foundation/common_voice_11_0" ``` You can also log in to your Hugging Face account to save and share your trained model on the Hub if you'd like: ```py from huggingface_hub import notebook_login notebook_login() ``` ## Load dataset and metric The [Common Voice 11.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0) dataset contains many hours of recorded speech in many different languages. This guide uses the [Marathi](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/viewer/mr/train) language as an example, but feel free to use any other language you're interested in. Initialize a [`~datasets.DatasetDict`] structure, and load the [`train`] (load both the `train+validation` split into `train`) and [`test`] splits from the dataset into it: ```py from datasets import load_dataset from datasets import load_dataset, DatasetDict common_voice = DatasetDict() common_voice["train"] = load_dataset(dataset_name, language_abbr, split="train+validation", use_auth_token=True) common_voice["test"] = load_dataset(dataset_name, language_abbr, split="test", use_auth_token=True) common_voice["train"][0] ``` ## Preprocess dataset Let's prepare the dataset for training. Load a feature extractor, tokenizer, and processor. You should also pass the language and task to the tokenizer and processor so they know how to process the inputs: ```py from transformers import AutoFeatureExtractor, AutoTokenizer, AutoProcessor feature_extractor = AutoFeatureExtractor.from_pretrained(model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, language=language, task=task) processor = AutoProcessor.from_pretrained(model_name_or_path, language=language, task=task) ``` You'll only be training on the `sentence` and `audio` columns, so you can remove the rest of the metadata with [`~datasets.Dataset.remove_columns`]: ```py common_voice = common_voice.remove_columns( ["accent", "age", "client_id", "down_votes", "gender", "locale", "path", "segment", "up_votes"] ) common_voice["train"][0] { "audio": { "path": "/root/.cache/huggingface/datasets/downloads/extracted/f7e1ef6a2d14f20194999aad5040c5d4bb3ead1377de3e1bbc6e9dba34d18a8a/common_voice_mr_30585613.mp3", "array": array( [1.13686838e-13, -1.42108547e-13, -1.98951966e-13, ..., 4.83472422e-06, 3.54798703e-06, 1.63231743e-06] ), "sampling_rate": 48000, }, "sentence": "आईचे आजारपण वाढत चालले, तसतशी मथीही नीट खातपीतनाशी झाली.", } ``` If you look at the `sampling_rate`, you'll see the audio was sampled at 48kHz. The Whisper model was pretrained on audio inputs at 16kHZ which means you'll need to downsample the audio inputs to match what the model was pretrained on. Downsample the audio by using the [`~datasets.Dataset.cast_column`] method on the `audio` column, and set the `sampling_rate` to 16kHz. The audio input is resampled on the fly the next time you call it: ```py from datasets import Audio common_voice = common_voice.cast_column("audio", Audio(sampling_rate=16000)) common_voice["train"][0] { "audio": { "path": "/root/.cache/huggingface/datasets/downloads/extracted/f7e1ef6a2d14f20194999aad5040c5d4bb3ead1377de3e1bbc6e9dba34d18a8a/common_voice_mr_30585613.mp3", "array": array( [-3.06954462e-12, -3.63797881e-12, -4.54747351e-12, ..., -7.74800901e-06, -1.74738125e-06, 4.36312439e-06] ), "sampling_rate": 16000, }, "sentence": "आईचे आजारपण वाढत चालले, तसतशी मथीही नीट खातपीतनाशी झाली.", } ``` Once you've cleaned up the dataset, you can write a function to generate the correct model inputs. The function should: 1. Resample the audio inputs to 16kHZ by loading the `audio` column. 2. Compute the input features from the audio `array` using the feature extractor. 3. Tokenize the `sentence` column to the input labels. ```py def prepare_dataset(batch): audio = batch["audio"] batch["input_features"] = feature_extractor(audio["array"], sampling_rate=audio["sampling_rate"]).input_features[0] batch["labels"] = tokenizer(batch["sentence"]).input_ids return batch ``` Apply the `prepare_dataset` function to the dataset with the [`~datasets.Dataset.map`] function, and set the `num_proc` argument to `2` to enable multiprocessing (if `map` hangs, then set `num_proc=1`): ```py common_voice = common_voice.map(prepare_dataset, remove_columns=common_voice.column_names["train"], num_proc=2) ``` Finally, create a `DataCollator` class to pad the labels in each batch to the maximum length, and replace padding with `-100` so they're ignored by the loss function. Then initialize an instance of the data collator: ```py import torch from dataclasses import dataclass from typing import Any, Dict, List, Union @dataclass class DataCollatorSpeechSeq2SeqWithPadding: processor: Any def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: input_features = [{"input_features": feature["input_features"]} for feature in features] batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt") label_features = [{"input_ids": feature["labels"]} for feature in features] labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt") labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item(): labels = labels[:, 1:] batch["labels"] = labels return batch data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor) ``` ## Train Now that the dataset is ready, you can turn your attention to the model. Start by loading the pretrained [`openai/whisper-large-v2`]() model from [`~transformers.AutoModelForSpeechSeq2Seq`], and make sure to set the [`~transformers.BitsAndBytesConfig.load_in_8bit`] argument to `True` to enable `int8` quantization. The `device_map=auto` argument automatically determines how to load and store the model weights: ```py from transformers import AutoModelForSpeechSeq2Seq model = AutoModelForSpeechSeq2Seq.from_pretrained(model_name_or_path, load_in_8bit=True, device_map="auto") ``` You should configure `forced_decoder_ids=None` because no tokens are used before sampling, and you won't need to suppress any tokens during generation either: ```py model.config.forced_decoder_ids = None model.config.suppress_tokens = [] ``` To get the model ready for `int8` quantization, use the utility function [`prepare_model_for_int8_training`](https://github.com/huggingface/peft/blob/34027fe813756897767b9a6f19ae7f1c4c7b418c/src/peft/utils/other.py#L35) to handle the following: - casts all the non `int8` modules to full precision (`fp32`) for stability - adds a forward hook to the input embedding layer to calculate the gradients of the input hidden states - enables gradient checkpointing for more memory-efficient training ```py from peft import prepare_model_for_int8_training model = prepare_model_for_int8_training(model) ``` Let's also apply LoRA to the training to make it even more efficient. Load a [`~peft.LoraConfig`] and configure the following parameters: - `r`, the dimension of the low-rank matrices - `lora_alpha`, scaling factor for the weight matrices - `target_modules`, the name of the attention matrices to apply LoRA to (`q_proj` and `v_proj`, or query and value in this case) - `lora_dropout`, dropout probability of the LoRA layers - `bias`, set to `none` <Tip> 💡 The weight matrix is scaled by `lora_alpha/r`, and a higher `lora_alpha` value assigns more weight to the LoRA activations. For performance, we recommend setting bias to `None` first, and then `lora_only`, before trying `all`. </Tip> ```py from peft import LoraConfig, PeftModel, LoraModel, LoraConfig, get_peft_model config = LoraConfig(r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none") ``` After you set up the [`~peft.LoraConfig`], wrap it and the base model with the [`get_peft_model`] function to create a [`PeftModel`]. Print out the number of trainable parameters to see how much more efficient LoRA is compared to fully training the model! ```py model = get_peft_model(model, config) model.print_trainable_parameters() "trainable params: 15728640 || all params: 1559033600 || trainable%: 1.0088711365810203" ``` Now you're ready to define some training hyperparameters in the [`~transformers.Seq2SeqTrainingArguments`] class, such as where to save the model to, batch size, learning rate, and number of epochs to train for. The [`PeftModel`] doesn't have the same signature as the base model, so you'll need to explicitly set `remove_unused_columns=False` and `label_names=["labels"]`. ```py from transformers import Seq2SeqTrainingArguments training_args = Seq2SeqTrainingArguments( output_dir="your-name/int8-whisper-large-v2-asr", per_device_train_batch_size=8, gradient_accumulation_steps=1, learning_rate=1e-3, warmup_steps=50, num_train_epochs=3, evaluation_strategy="epoch", fp16=True, per_device_eval_batch_size=8, generation_max_length=128, logging_steps=25, remove_unused_columns=False, label_names=["labels"], ) ``` It is also a good idea to write a custom [`~transformers.TrainerCallback`] to save model checkpoints during training: ```py from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR class SavePeftModelCallback(TrainerCallback): def on_save( self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs, ): checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}") peft_model_path = os.path.join(checkpoint_folder, "adapter_model") kwargs["model"].save_pretrained(peft_model_path) pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin") if os.path.exists(pytorch_model_path): os.remove(pytorch_model_path) return control ``` Pass the `Seq2SeqTrainingArguments`, model, datasets, data collator, tokenizer, and callback to the [`~transformers.Seq2SeqTrainer`]. You can optionally set `model.config.use_cache = False` to silence any warnings. Once everything is ready, call [`~transformers.Trainer.train`] to start training! ```py from transformers import Seq2SeqTrainer, TrainerCallback, Seq2SeqTrainingArguments, TrainerState, TrainerControl trainer = Seq2SeqTrainer( args=training_args, model=model, train_dataset=common_voice["train"], eval_dataset=common_voice["test"], data_collator=data_collator, tokenizer=processor.feature_extractor, callbacks=[SavePeftModelCallback], ) model.config.use_cache = False trainer.train() ``` ## Evaluate [Word error rate](https://huggingface.co/spaces/evaluate-metric/wer) (WER) is a common metric for evaluating ASR models. Load the WER metric from 🤗 Evaluate: ```py import evaluate metric = evaluate.load("wer") ``` Write a loop to evaluate the model performance. Set the model to evaluation mode first, and write the loop with [`torch.cuda.amp.autocast()`](https://pytorch.org/docs/stable/amp.html) because `int8` training requires autocasting. Then, pass a batch of examples to the model to evaluate. Get the decoded predictions and labels, and add them as a batch to the WER metric before calling `compute` to get the final WER score: ```py from torch.utils.data import DataLoader from tqdm import tqdm import numpy as np import gc eval_dataloader = DataLoader(common_voice["test"], batch_size=8, collate_fn=data_collator) model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): with torch.cuda.amp.autocast(): with torch.no_grad(): generated_tokens = ( model.generate( input_features=batch["input_features"].to("cuda"), decoder_input_ids=batch["labels"][:, :4].to("cuda"), max_new_tokens=255, ) .cpu() .numpy() ) labels = batch["labels"].cpu().numpy() labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) metric.add_batch( predictions=decoded_preds, references=decoded_labels, ) del generated_tokens, labels, batch gc.collect() wer = 100 * metric.compute() print(f"{wer=}") ``` ## Share model Once you're happy with your results, you can upload your model to the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] method: ```py model.push_to_hub("your-name/int8-whisper-large-v2-asr") ``` ## Inference Let's test the model out now! Instantiate the model configuration from [`PeftConfig`], and from here, you can use the configuration to load the base and [`PeftModel`], tokenizer, processor, and feature extractor. Remember to define the `language` and `task` in the tokenizer, processor, and `forced_decoder_ids`: ```py from peft import PeftModel, PeftConfig peft_model_id = "smangrul/openai-whisper-large-v2-LORA-colab" language = "Marathi" task = "transcribe" peft_config = PeftConfig.from_pretrained(peft_model_id) model = WhisperForConditionalGeneration.from_pretrained( peft_config.base_model_name_or_path, load_in_8bit=True, device_map="auto" ) model = PeftModel.from_pretrained(model, peft_model_id) tokenizer = WhisperTokenizer.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) processor = WhisperProcessor.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) feature_extractor = processor.feature_extractor forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task=task) ``` Load an audio sample (you can listen to it in the [Dataset Preview](https://huggingface.co/datasets/stevhliu/dummy)) to transcribe, and the [`~transformers.AutomaticSpeechRecognitionPipeline`]: ```py from transformers import AutomaticSpeechRecognitionPipeline audio = "https://huggingface.co/datasets/stevhliu/dummy/resolve/main/mrt_01523_00028548203.wav" pipeline = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) ``` Then use the pipeline with autocast as a context manager on the audio sample: ```py with torch.cuda.amp.autocast(): text = pipe(audio, generate_kwargs={"forced_decoder_ids": forced_decoder_ids}, max_new_tokens=255)["text"] text "मी तुमच्यासाठी काही करू शकतो का?" ```
peft/docs/source/task_guides/int8-asr.md/0
{ "file_path": "peft/docs/source/task_guides/int8-asr.md", "repo_id": "peft", "token_count": 6115 }
156
import os import torch from datasets import load_dataset from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, default_data_collator, get_linear_schedule_with_warmup from peft import AdaLoraConfig, PeftConfig, PeftModel, TaskType, get_peft_model os.environ["TOKENIZERS_PARALLELISM"] = "false" device = "cuda" model_name_or_path = "facebook/bart-base" tokenizer_name_or_path = "facebook/bart-base" checkpoint_name = "financial_sentiment_analysis_lora_v1.pt" text_column = "sentence" label_column = "text_label" max_length = 128 lr = 1e-3 num_epochs = 8 batch_size = 8 # creating model peft_config = AdaLoraConfig( init_r=12, target_r=8, beta1=0.85, beta2=0.85, tinit=200, tfinal=1000, deltaT=10, lora_alpha=32, lora_dropout=0.1, task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, ) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() # loading dataset dataset = load_dataset("financial_phrasebank", "sentences_allagree") dataset = dataset["train"].train_test_split(test_size=0.1) dataset["validation"] = dataset["test"] del dataset["test"] classes = dataset["train"].features["label"].names dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["label"]]}, batched=True, num_proc=1, ) # data preprocessing tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt") labels = tokenizer(targets, max_length=3, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) # optimizer and lr scheduler optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), ) model.base_model.peft_config.total_step = len(train_dataloader) * num_epochs # training and evaluation model = model.to(device) global_step = 0 for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() # Update the importance of low-rank matrices # and allocate the budget accordingly. model.base_model.update_and_allocate(global_step) optimizer.zero_grad() global_step += 1 model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() eval_preds.extend( tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True) ) eval_epoch_loss = eval_loss / len(train_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(eval_dataloader) train_ppl = torch.exp(train_epoch_loss) print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}") # print accuracy correct = 0 total = 0 for pred, true in zip(eval_preds, dataset["validation"]["text_label"]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 print(f"{accuracy=} % on the evaluation dataset") print(f"{eval_preds[:10]=}") print(f"{dataset['validation']['text_label'][:10]=}") # saving model peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" model.save_pretrained(peft_model_id) ckpt = f"{peft_model_id}/adapter_model.bin" # get_ipython().system('du -h $ckpt') peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(model, peft_model_id) model.eval() i = 13 inputs = tokenizer(dataset["validation"][text_column][i], return_tensors="pt") print(dataset["validation"][text_column][i]) print(inputs) with torch.no_grad(): outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
peft/examples/conditional_generation/peft_adalora_seq2seq.py/0
{ "file_path": "peft/examples/conditional_generation/peft_adalora_seq2seq.py", "repo_id": "peft", "token_count": 2250 }
157
<jupyter_start><jupyter_text>Using PEFT with timm `peft` allows us to train any model with LoRA as long as the layer type is supported. Since `Conv2D` is one of the supported layer types, it makes sense to test it on image models.In this short notebook, we will demonstrate this with an image classification task using [`timm`](https://huggingface.co/docs/timm/index). Imports Make sure that you have the latest version of `peft` installed. To ensure that, run this in your Python environment: python -m pip install --upgrade peft Also, ensure that `timm` is installed: python -m pip install --upgrade timm<jupyter_code>import timm import torch from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform import peft from datasets import load_dataset torch.manual_seed(0)<jupyter_output><empty_output><jupyter_text>Loading the pre-trained base model We use a small pretrained `timm` model, `PoolFormer`. Find more info on its [model card](https://huggingface.co/timm/poolformer_m36.sail_in1k).<jupyter_code>model_id_timm = "timm/poolformer_m36.sail_in1k"<jupyter_output><empty_output><jupyter_text>We tell `timm` that we deal with 3 classes, to ensure that the classification layer has the correct size.<jupyter_code>model = timm.create_model(model_id_timm, pretrained=True, num_classes=3)<jupyter_output><empty_output><jupyter_text>These are the transformations steps necessary to process the image.<jupyter_code>transform = create_transform(**resolve_data_config(model.pretrained_cfg, model=model))<jupyter_output><empty_output><jupyter_text>Data For this exercise, we use the "beans" dataset. More details on the dataset can be found on [its datasets page](https://huggingface.co/datasets/beans). For our purposes, what's important is that we have image inputs and the target we're trying to predict is one of three classes for each image.<jupyter_code>ds = load_dataset("beans") ds_train = ds["train"] ds_valid = ds["validation"] ds_train[0]["image"]<jupyter_output><empty_output><jupyter_text>We define a small processing function which is responsible for loading and transforming the images, as well as extracting the labels.<jupyter_code>def process(batch): x = torch.cat([transform(img).unsqueeze(0) for img in batch["image"]]) y = torch.tensor(batch["labels"]) return {"x": x, "y": y} ds_train.set_transform(process) ds_valid.set_transform(process) train_loader = torch.utils.data.DataLoader(ds_train, batch_size=32) valid_loader = torch.utils.data.DataLoader(ds_valid, batch_size=32)<jupyter_output><empty_output><jupyter_text>Training This is just a function that performs the train loop, nothing fancy happening.<jupyter_code>def train(model, optimizer, criterion, train_dataloader, valid_dataloader, epochs): for epoch in range(epochs): model.train() train_loss = 0 for batch in train_dataloader: xb, yb = batch["x"], batch["y"] xb, yb = xb.to(device), yb.to(device) outputs = model(xb) lsm = torch.nn.functional.log_softmax(outputs, dim=-1) loss = criterion(lsm, yb) train_loss += loss.detach().float() loss.backward() optimizer.step() optimizer.zero_grad() model.eval() valid_loss = 0 correct = 0 n_total = 0 for batch in valid_dataloader: xb, yb = batch["x"], batch["y"] xb, yb = xb.to(device), yb.to(device) with torch.no_grad(): outputs = model(xb) lsm = torch.nn.functional.log_softmax(outputs, dim=-1) loss = criterion(lsm, yb) valid_loss += loss.detach().float() correct += (outputs.argmax(-1) == yb).sum().item() n_total += len(yb) train_loss_total = (train_loss / len(train_dataloader)).item() valid_loss_total = (valid_loss / len(valid_dataloader)).item() valid_acc_total = correct / n_total print(f"{epoch=:<2} {train_loss_total=:.4f} {valid_loss_total=:.4f} {valid_acc_total=:.4f}")<jupyter_output><empty_output><jupyter_text>Selecting which layers to fine-tune with LoRA Let's take a look at the layers of our model. We only print the first 30, since there are quite a few:<jupyter_code>[(n, type(m)) for n, m in model.named_modules()][:30]<jupyter_output><empty_output><jupyter_text>Most of these layers are not good targets for LoRA, but we see a couple that should interest us. Their names are `'stages.0.blocks.0.mlp.fc1'`, etc. With a bit of regex, we can match them easily.Also, we should inspect the name of the classification layer, since we want to train that one too!<jupyter_code>[(n, type(m)) for n, m in model.named_modules()][-5:]<jupyter_output><empty_output><jupyter_text>config = peft.LoraConfig( r=8, target_modules=r".*\.mlp\.fc\d|head\.fc", ) Okay, this gives us all the information we need to fine-tune this model. With a bit of regex, we match the convolutional layers that should be targeted for LoRA. We also want to train the classification layer `'head.fc'` (without LoRA), so we add it to the `modules_to_save`.<jupyter_code>config = peft.LoraConfig(r=8, target_modules=r".*\.mlp\.fc\d", modules_to_save=["head.fc"])<jupyter_output><empty_output><jupyter_text>Finally, let's create the `peft` model, the optimizer and criterion, and we can get started. As shown below, less than 2% of the model's total parameters are updated thanks to `peft`.<jupyter_code>device = "cuda" if torch.cuda.is_available() else "cpu" peft_model = peft.get_peft_model(model, config).to(device) optimizer = torch.optim.Adam(peft_model.parameters(), lr=2e-4) criterion = torch.nn.CrossEntropyLoss() peft_model.print_trainable_parameters() %time train(peft_model, optimizer, criterion, train_loader, valid_dataloader=valid_loader, epochs=10)<jupyter_output>epoch=0 train_loss_total=1.2999 valid_loss_total=1.0624 valid_acc_total=0.4436 epoch=1 train_loss_total=1.0200 valid_loss_total=0.8906 valid_acc_total=0.7594 epoch=2 train_loss_total=0.8874 valid_loss_total=0.6894 valid_acc_total=0.8045 epoch=3 train_loss_total=0.7440 valid_loss_total=0.4797 valid_acc_total=0.8045 epoch=4 train_loss_total=0.6025 valid_loss_total=0.3419 valid_acc_total=0.8120 epoch=5 train_loss_total=0.4820 valid_loss_total=0.2589 valid_acc_total=0.8421 epoch=6 train_loss_total=0.3567 valid_loss_total=0.2101 valid_acc_total=0.8722 epoch=7 train_loss_total=0.2835 valid_loss_total=0.1385 valid_acc_total=0.9098 epoch=8 train_loss_total=0.1815 valid_loss_total=0.1108 valid_acc_total=0.9474 epoch=9 train_loss_total=0.1341 valid_loss_total=0.0785 valid_acc_total=0.9699 CPU times: user 4min 3s, sys: 36.3 s, total: 4min 40s Wall time: 3min 32s<jupyter_text>We get an accuracy of ~0.97, despite only training a tiny amount of parameters. That's a really nice result. Sharing the model through Hugging Face Hub Pushing the model to Hugging Face Hub If we want to share the fine-tuned weights with the world, we can upload them to Hugging Face Hub like this:<jupyter_code>user = "BenjaminB" # put your user name here model_name = "peft-lora-with-timm-model" model_id = f"{user}/{model_name}" peft_model.push_to_hub(model_id);<jupyter_output><empty_output><jupyter_text>As we can see, the adapter size is only 4.3 MB. The original model was 225 MB. That's a very big saving. Loading the model from HF Hub Now, it only takes one step to load the model from HF Hub. To do this, we can use `PeftModel.from_pretrained`, passing our base model and the model ID:<jupyter_code>base_model = timm.create_model(model_id_timm, pretrained=True, num_classes=3) loaded = peft.PeftModel.from_pretrained(base_model, model_id) x = ds_train[:1]["x"] y_peft = peft_model(x.to(device)) y_loaded = loaded(x) torch.allclose(y_peft.cpu(), y_loaded)<jupyter_output><empty_output><jupyter_text>Clean up Finally, as a clean up step, you may want to delete the repo.<jupyter_code>from huggingface_hub import delete_repo delete_repo(model_id)<jupyter_output><empty_output>
peft/examples/image_classification/image_classification_timm_peft_lora.ipynb/0
{ "file_path": "peft/examples/image_classification/image_classification_timm_peft_lora.ipynb", "repo_id": "peft", "token_count": 3067 }
158
<jupyter_start><jupyter_code>!pip install -q git+https://github.com/huggingface/transformers.git !pip install -q git+https://github.com/huggingface/peft.git !pip install -q git+https://github.com/huggingface/accelerate.git@main !pip install huggingface_hub !pip install bitsandbytes !pip install SentencePiece import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" from huggingface_hub import notebook_login import torch notebook_login() from peft import PeftModel from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig model_name = "decapoda-research/llama-7b-hf" tokenizer = LlamaTokenizer.from_pretrained(model_name) model = LlamaForCausalLM.from_pretrained(model_name, load_in_8bit=True, device_map="auto", use_auth_token=True) %%time model = PeftModel.from_pretrained(model, "tloen/alpaca-lora-7b", adapter_name="eng_alpaca") %%time model.load_adapter("22h/cabrita-lora-v0-1", adapter_name="portuguese_alpaca") model model.to("cuda") import torch device = "cuda" def generate_prompt(instruction, input=None): if input: return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Input: {input} ### Response:""" else: return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Response:""" def evaluate( instruction, input=None, temperature=0.1, top_p=0.75, top_k=40, num_beams=4, max_new_tokens=256, **kwargs, ): prompt = generate_prompt(instruction, input) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].to(device) generation_config = GenerationConfig( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, no_repeat_ngram_size=3, **kwargs, ) with torch.no_grad(): generation_output = model.generate( input_ids=input_ids, generation_config=generation_config, return_dict_in_generate=True, output_scores=True, max_new_tokens=max_new_tokens, ) s = generation_output.sequences[0] output = tokenizer.decode(s) return output.split("### Response:")[1].strip() %%time model.set_adapter("eng_alpaca") instruction = "Tell me about alpacas." print(evaluate(instruction)) %%time model.set_adapter("portuguese_alpaca") instruction = "Invente uma desculpa criativa pra dizer que não preciso ir à festa." print(evaluate(instruction)) with model.disable_adapter(): instruction = "Invente uma desculpa criativa pra dizer que não preciso ir à festa." print(evaluate(instruction))<jupyter_output>I'm sorry, but I can't go to the party. I'm sick. I have a cold. I don't feel well. I need to stay at home and rest. I have a lot of homework to do. My dog ate my homework. My homework is too hard. I didn't have time to do it. It's too late. I forgot about it. My parents won't let me go. My parents are out of town. They're on vacation. They have to work. They are sick. They need to take care of my brother. They're not home. They went to the grocery store. They took the car to the mechanic. They had to go to a meeting. They were in a hurry. They forgot about me. Their car broke down. Their car ran out of gas. They got a flat tire. They couldn't find a parking space. They didn' t have enough money. They lost their wallet. It's raining. The roads are icy. There's a blizzard. There are too many cars on the road. There was an accident.
peft/examples/multi_adapter_examples/PEFT_Multi_LoRA_Inference.ipynb/0
{ "file_path": "peft/examples/multi_adapter_examples/PEFT_Multi_LoRA_Inference.ipynb", "repo_id": "peft", "token_count": 1328 }
159
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import collections import inspect import os import warnings from contextlib import contextmanager from copy import deepcopy from typing import Any, Dict, List, Optional, Union import packaging.version import torch import transformers from accelerate import dispatch_model, infer_auto_device_map from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules from accelerate.utils import get_balanced_memory from huggingface_hub import ModelCard, ModelCardData, hf_hub_download from safetensors.torch import save_file as safe_save_file from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers import PreTrainedModel from transformers.modeling_outputs import QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from transformers.utils import PushToHubMixin from . import __version__ from .config import PeftConfig from .tuners import ( AdaLoraModel, AdaptionPromptModel, IA3Model, LoHaModel, LoKrModel, LoraModel, MultitaskPromptEmbedding, OFTModel, PolyModel, PrefixEncoder, PromptEmbedding, PromptEncoder, ) from .utils import ( SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, PeftType, TaskType, _get_batch_size, _prepare_prompt_learning_config, _set_adapter, _set_trainable, get_peft_model_state_dict, id_tensor_storage, infer_device, load_peft_weights, set_peft_model_state_dict, shift_tokens_right, ) PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.LOHA: LoHaModel, PeftType.LOKR: LoKrModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder, PeftType.ADALORA: AdaLoraModel, PeftType.ADAPTION_PROMPT: AdaptionPromptModel, PeftType.IA3: IA3Model, PeftType.OFT: OFTModel, PeftType.POLY: PolyModel, } class PeftModel(PushToHubMixin, torch.nn.Module): """ Base model encompassing various Peft methods. Args: model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft. peft_config ([`PeftConfig`]): The configuration of the Peft model. adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`. **Attributes**: - **base_model** ([`torch.nn.Module`]) -- The base transformer model used for Peft. - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model. - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when saving the model. - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if using [`PromptLearningConfig`]. - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if using [`PromptLearningConfig`]. - **transformer_backbone_name** (`str`) -- The name of the transformer backbone in the base model if using [`PromptLearningConfig`]. - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone in the base model if using [`PromptLearningConfig`]. """ def __init__(self, model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default") -> None: super().__init__() self.modules_to_save = None self.active_adapter = adapter_name self.peft_type = peft_config.peft_type self._is_prompt_learning = peft_config.is_prompt_learning if self._is_prompt_learning: self._peft_config = {adapter_name: peft_config} self.base_model = model self.add_adapter(adapter_name, peft_config) else: self._peft_config = None cls = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type] self.base_model = cls(model, {adapter_name: peft_config}, adapter_name) self.set_additional_trainable_modules(peft_config, adapter_name) if getattr(model, "is_gradient_checkpointing", True): model = self._prepare_model_for_gradient_checkpointing(model) # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected # behavior we disable that in this line. if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"): self.base_model.config.pretraining_tp = 1 @property def peft_config(self) -> Dict[str, PeftConfig]: if self._is_prompt_learning: return self._peft_config return self.base_model.peft_config @property def active_adapters(self) -> list[str]: try: adapters = self.base_model.active_adapters except AttributeError: adapters = self.active_adapter if isinstance(adapters, str): adapters = [adapters] return adapters @peft_config.setter def peft_config(self, value: Dict[str, PeftConfig]): if self._is_prompt_learning: self._peft_config = value else: self.base_model.peft_config = value def save_pretrained( self, save_directory: str, safe_serialization: bool = True, selected_adapters: Optional[List[str]] = None, save_embedding_layers: Union[str, bool] = "auto", is_main_process: bool = True, **kwargs: Any, ) -> None: r""" This function saves the adapter model and the adapter configuration files to a directory, so that it can be reloaded using the [`PeftModel.from_pretrained`] class method, and also used by the [`PeftModel.push_to_hub`] method. Args: save_directory (`str`): Directory where the adapter model and configuration files will be saved (will be created if it does not exist). safe_serialization (`bool`, *optional*): Whether to save the adapter files in safetensors format, defaults to `True`. selected_adapters (`List[str]`, *optional*): A list of adapters to be saved. If `None`, will default to all adapters. save_embedding_layers (`Union[bool, str]`, *optional*, defaults to `"auto"`): If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. and automatically sets the boolean flag. This only works for 🤗 transformers models. is_main_process (`bool`, *optional*): Whether the process calling this is the main process or not. Will default to `True`. Will not save the checkpoint if not on the main process, which is important for multi device setups (e.g. DDP). kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the `push_to_hub` method. """ if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") if selected_adapters is None: selected_adapters = list(self.peft_config.keys()) else: if any( selected_adapter_name not in list(self.peft_config.keys()) for selected_adapter_name in selected_adapters ): raise ValueError( f"You passed an invalid `selected_adapters` arguments, current supported adapter names are" f" {list(self.peft_config.keys())} - got {selected_adapters}." ) if is_main_process: os.makedirs(save_directory, exist_ok=True) self.create_or_update_model_card(save_directory) for adapter_name in selected_adapters: peft_config = self.peft_config[adapter_name] # save only the trainable weights output_state_dict = get_peft_model_state_dict( self, state_dict=kwargs.get("state_dict", None), adapter_name=adapter_name, save_embedding_layers=save_embedding_layers, ) output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory os.makedirs(output_dir, exist_ok=True) if is_main_process and safe_serialization: # Section copied from: https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_utils.py#L2111-L2134 # Safetensors does not allow tensor aliasing. # We're going to remove aliases before saving ptrs = collections.defaultdict(list) for name, tensor in output_state_dict.items(): # Sometimes in the state_dict we have non-tensor objects. # e.g. in bitsandbytes we have some `str` objects in the state_dict if isinstance(tensor, torch.Tensor): ptrs[id_tensor_storage(tensor)].append(name) else: # In the non-tensor case, fall back to the pointer of the object itself ptrs[id(tensor)].append(name) # These are all the pointers of shared tensors. shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} for _, names in shared_ptrs.items(): # Here we just clone the shared tensors to avoid tensor aliasing which is # not supported in safetensors. for shared_tensor_name in names[1:]: output_state_dict[shared_tensor_name] = output_state_dict[shared_tensor_name].clone() safe_save_file( output_state_dict, os.path.join(output_dir, SAFETENSORS_WEIGHTS_NAME), metadata={"format": "pt"}, ) elif is_main_process: torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME)) # save the config and change the inference mode to `True` if peft_config.base_model_name_or_path is None: peft_config.base_model_name_or_path = ( self.base_model.__dict__.get("name_or_path", None) if peft_config.is_prompt_learning else self.base_model.model.__dict__.get("name_or_path", None) ) inference_mode = peft_config.inference_mode peft_config.inference_mode = True if peft_config.task_type is None: # deal with auto mapping base_model_class = self._get_base_model_class( is_prompt_tuning=peft_config.is_prompt_learning, ) parent_library = base_model_class.__module__ auto_mapping_dict = { "base_model_class": base_model_class.__name__, "parent_library": parent_library, } else: auto_mapping_dict = None if is_main_process: peft_config.save_pretrained(output_dir, auto_mapping_dict=auto_mapping_dict) peft_config.inference_mode = inference_mode @classmethod def from_pretrained( cls, model: torch.nn.Module, model_id: Union[str, os.PathLike], adapter_name: str = "default", is_trainable: bool = False, config: Optional[PeftConfig] = None, **kwargs: Any, ) -> "PeftModel": r""" Instantiate a PEFT model from a pretrained model and loaded PEFT weights. Note that the passed `model` may be modified inplace. Args: model ([`torch.nn.Module`]): The model to be adapted. For 🤗 Transformers models, the model should be initialized with the [`~transformers.PreTrainedModel.from_pretrained`]. model_id (`str` or `os.PathLike`): The name of the PEFT configuration to use. Can be either: - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face Hub. - A path to a directory containing a PEFT configuration file saved using the `save_pretrained` method (`./my_peft_config_directory/`). adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to be loaded. This is useful for loading multiple adapters. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be used for inference. config ([`~peft.PeftConfig`], *optional*): The configuration object to use instead of an automatically loaded configuation. This configuration object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already loaded before calling `from_pretrained`. kwargs: (`optional`): Additional keyword arguments passed along to the specific PEFT configuration class. """ from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING # load the config if config is None: config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_id, subfolder=kwargs.get("subfolder", None), revision=kwargs.get("revision", None), cache_dir=kwargs.get("cache_dir", None), use_auth_token=kwargs.get("use_auth_token", None), token=kwargs.get("token", None), ) ].from_pretrained(model_id, **kwargs) elif isinstance(config, PeftConfig): config.inference_mode = not is_trainable else: raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}") if (getattr(model, "hf_device_map", None) is not None) and len( set(model.hf_device_map.values()).intersection({"cpu", "disk"}) ) > 0: remove_hook_from_submodules(model) if config.is_prompt_learning and is_trainable: raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: config.inference_mode = not is_trainable if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys(): model = cls(model, config, adapter_name) else: model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name) model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs) return model def _setup_prompt_encoder(self, adapter_name: str): config = self.peft_config[adapter_name] if not hasattr(self, "prompt_encoder"): self.prompt_encoder = torch.nn.ModuleDict({}) self.prompt_tokens = {} transformer_backbone = None for name, module in self.base_model.named_children(): for param in module.parameters(): param.requires_grad = False if isinstance(module, PreTrainedModel): # Make sure to freeze Tranformers model if transformer_backbone is None: transformer_backbone = module self.transformer_backbone_name = name if transformer_backbone is None: transformer_backbone = self.base_model if config.num_transformer_submodules is None: config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1 for named_param, value in list(transformer_backbone.named_parameters()): # for ZeRO-3, the tensor is sharded across accelerators and deepspeed modifies it to a tensor with shape [0] # the actual unsharded shape is stored in "ds_shape" attribute # special handling is needed in case the model is initialized in deepspeed.zero.Init() context or HfDeepSpeedConfig # has been called before # For reference refer to issue: https://github.com/huggingface/peft/issues/996 deepspeed_distributed_tensor_shape = getattr(value, "ds_shape", None) if value.shape[0] == self.base_model.config.vocab_size or ( deepspeed_distributed_tensor_shape is not None and deepspeed_distributed_tensor_shape[0] == self.base_model.config.vocab_size ): self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(".weight", "")) break if config.peft_type == PeftType.PROMPT_TUNING: prompt_encoder = PromptEmbedding(config, self.word_embeddings) elif config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompt_encoder = MultitaskPromptEmbedding(config, self.word_embeddings) elif config.peft_type == PeftType.P_TUNING: prompt_encoder = PromptEncoder(config) elif config.peft_type == PeftType.PREFIX_TUNING: prompt_encoder = PrefixEncoder(config) else: raise ValueError("Not supported") prompt_encoder = prompt_encoder.to(self.device) self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder})) self.prompt_tokens[adapter_name] = torch.arange( config.num_virtual_tokens * config.num_transformer_submodules ).long() def _prepare_model_for_gradient_checkpointing(self, model: PreTrainedModel): r""" Prepares the model for gradient checkpointing if necessary """ if not ( getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False) or getattr(model, "is_quantized", False) ): if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() elif hasattr(model, "get_input_embeddings"): def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) return model def get_prompt_embedding_to_save(self, adapter_name: str) -> torch.Tensor: """ Returns the prompt embedding to save when saving the model. Only applicable when using a prompt learning method. """ prompt_encoder = self.prompt_encoder[adapter_name] prompt_tokens = ( self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(prompt_encoder.embedding.weight.device) ) if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING: prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens] if self.peft_config[adapter_name].peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompt_embeddings = super(MultitaskPromptEmbedding, prompt_encoder).forward(prompt_tokens) else: prompt_embeddings = prompt_encoder(prompt_tokens) return prompt_embeddings[0].detach().cpu() def get_prompt(self, batch_size: int, task_ids: Optional[torch.Tensor] = None) -> torch.Tensor: """ Returns the virtual prompts to use for Peft. Only applicable when using a prompt learning method. """ peft_config = self.active_peft_config prompt_encoder = self.prompt_encoder[self.active_adapter] prompt_tokens = ( self.prompt_tokens[self.active_adapter] .unsqueeze(0) .expand(batch_size, -1) .to(prompt_encoder.embedding.weight.device) ) if peft_config.peft_type == PeftType.PREFIX_TUNING: prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens] if peft_config.inference_mode: past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) else: past_key_values = prompt_encoder(prompt_tokens) if self.base_model_torch_dtype is not None: past_key_values = past_key_values.to(self.base_model_torch_dtype) past_key_values = past_key_values.view( batch_size, peft_config.num_virtual_tokens, peft_config.num_layers * 2, peft_config.num_attention_heads, peft_config.token_dim // peft_config.num_attention_heads, ) if peft_config.num_transformer_submodules == 2: past_key_values = torch.cat([past_key_values, past_key_values], dim=2) past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split( peft_config.num_transformer_submodules * 2 ) if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None: post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type] past_key_values = post_process_fn(past_key_values) return past_key_values else: if peft_config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompts = prompt_encoder(prompt_tokens, task_ids) else: if peft_config.inference_mode: prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) else: prompts = prompt_encoder(prompt_tokens) return prompts def get_nb_trainable_parameters(self) -> tuple[int, int]: r""" Returns the number of trainable parameters and the number of all parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in self.named_parameters(): num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel # Due to the design of 4bit linear layers from bitsandbytes # one needs to multiply the number of parameters by 2 to get # the correct number of parameters if param.__class__.__name__ == "Params4bit": num_params = num_params * 2 all_param += num_params if param.requires_grad: trainable_params += num_params return trainable_params, all_param def print_trainable_parameters(self) -> None: """ Prints the number of trainable parameters in the model. """ trainable_params, all_param = self.get_nb_trainable_parameters() print( f"trainable params: {trainable_params:,d} || all params: {all_param:,d} || trainable%: {100 * trainable_params / all_param}" ) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.base_model, name) def forward(self, *args: Any, **kwargs: Any): """ Forward pass of the model. """ return self.get_base_model()(*args, **kwargs) def _get_base_model_class(self, is_prompt_tuning=False): """ Returns the base model class. """ if not is_prompt_tuning: return self.base_model.model.__class__ return self.base_model.__class__ @contextmanager def disable_adapter(self): """ Context manager that disables the adapter module. Use this to run inference on the base model. Example: ```py >>> with model.disable_adapter(): ... model(inputs) ``` """ try: if self.peft_config[self.active_adapter].is_prompt_learning: # TODO: consider replacing this patching of methods with a more robust mechanism: setting a flag and # letting the underyling methods deal with it, same as how LoRA does it. old_forward = self.forward self.forward = self.base_model.forward old_prepare_inputs_for_generation = self.prepare_inputs_for_generation self.prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation else: self.base_model.disable_adapter_layers() yield finally: if self.peft_config[self.active_adapter].is_prompt_learning: self.forward = old_forward self.old_prepare_inputs_for_generation = old_prepare_inputs_for_generation else: self.base_model.enable_adapter_layers() def get_base_model(self) -> torch.nn.Module: """ Returns the base model. """ return ( self.base_model if (self.active_peft_config.is_prompt_learning or self.peft_type == PeftType.POLY) else self.base_model.model ) def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None: """ Add an adapter to the model based on the passed configuration. The name for the new adapter should be unique. The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active adapter. Args: adapter_name (`str`): The name of the adapter to be added. peft_config ([`PeftConfig`]): The configuration of the adapter to be added. """ if peft_config.peft_type != self.peft_type: raise ValueError( f"Cannot combine adapters with different peft types. " f"Found {self.peft_type} and {peft_config.peft_type}." ) try: if peft_config.is_prompt_learning: self.peft_config[adapter_name] = peft_config if hasattr(self.config, "to_dict"): dict_config = self.config.to_dict() else: dict_config = self.config peft_config = _prepare_prompt_learning_config(peft_config, dict_config) self._setup_prompt_encoder(adapter_name) elif peft_config.is_adaption_prompt: self.base_model.add_adapter(adapter_name, peft_config) else: self.peft_config[adapter_name] = peft_config self.base_model.inject_adapter(self.base_model.model, adapter_name) except Exception: # somthing went wrong, roll back if adapter_name in self.peft_config: del self.peft_config[adapter_name] raise self.set_additional_trainable_modules(peft_config, adapter_name) def set_additional_trainable_modules(self, peft_config, adapter_name): if getattr(peft_config, "modules_to_save", None) is not None: if self.modules_to_save is None: self.modules_to_save = set(peft_config.modules_to_save) else: self.modules_to_save.update(peft_config.modules_to_save) _set_trainable(self, adapter_name) @classmethod def _split_kwargs(cls, kwargs: Dict[str, Any]): _kwargs_not_in_hf_hub_download_signature = ("use_auth_token",) hf_hub_download_kwargs = {} other_kwargs = {} for key, value in kwargs.items(): if key in inspect.signature(hf_hub_download).parameters or key in _kwargs_not_in_hf_hub_download_signature: hf_hub_download_kwargs[key] = value else: other_kwargs[key] = value return hf_hub_download_kwargs, other_kwargs def load_adapter(self, model_id: str, adapter_name: str, is_trainable: bool = False, **kwargs: Any): """ Load a trained adapter into the model. The name for the new adapter should be unique. The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active adapter. Args: adapter_name (`str`): The name of the adapter to be added. peft_config ([`PeftConfig`]): The configuration of the adapter to be added. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be used for inference. kwargs: (`optional`): Additional arguments to modify the way the adapter is loaded, e.g. the token for Hugging Face Hub. """ from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING hf_hub_download_kwargs, kwargs = self._split_kwargs(kwargs) torch_device = infer_device() if adapter_name not in self.peft_config: # load the config peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_id, **hf_hub_download_kwargs, ) ].from_pretrained( model_id, **hf_hub_download_kwargs, ) if peft_config.is_prompt_learning and is_trainable: raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: peft_config.inference_mode = not is_trainable self.add_adapter(adapter_name, peft_config) adapters_weights = load_peft_weights(model_id, device=torch_device, **hf_hub_download_kwargs) # load the weights into the model load_result = set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name) if ( (getattr(self, "hf_device_map", None) is not None) and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0) and len(self.peft_config) == 1 ): device_map = kwargs.get("device_map", "auto") max_memory = kwargs.get("max_memory", None) offload_dir = kwargs.get("offload_folder", None) offload_index = kwargs.get("offload_index", None) dispatch_model_kwargs = {} # Safety checker for previous `accelerate` versions # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/ if "offload_index" in inspect.signature(dispatch_model).parameters: dispatch_model_kwargs["offload_index"] = offload_index no_split_module_classes = self._no_split_modules if device_map != "sequential": max_memory = get_balanced_memory( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes, low_zero=(device_map == "balanced_low_0"), ) if isinstance(device_map, str): device_map = infer_auto_device_map( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes ) dispatch_model( self, device_map=device_map, offload_dir=offload_dir, **dispatch_model_kwargs, ) hook = AlignDevicesHook(io_same_device=True) if self.peft_config[adapter_name].is_prompt_learning: remove_hook_from_submodules(self.prompt_encoder) add_hook_to_module(self.get_base_model(), hook) # Set model in evaluation mode to deactivate Dropout modules by default if not is_trainable: self.eval() return load_result def set_adapter(self, adapter_name: str) -> None: """ Sets the active adapter. Only one adapter can be active at a time. Args: adapter_name (`str`): The name of the adapter to be set as active. The adapter must be loaded first. """ if adapter_name not in self.peft_config: raise ValueError(f"Adapter {adapter_name} not found.") self.active_adapter = adapter_name if not self.peft_config[adapter_name].is_prompt_learning: self.base_model.set_adapter(adapter_name) _set_adapter(self, adapter_name) @property def base_model_torch_dtype(self): return getattr(self.base_model, "dtype", None) @property def active_peft_config(self): return self.peft_config[self.active_adapter] def create_or_update_model_card(self, output_dir: str): """ Updates or create model card to include information about peft: 1. Adds `peft` library tag 2. Adds peft version 3. Adds base model info 4. Adds quantization information if it was used """ filename = os.path.join(output_dir, "README.md") card = ModelCard.load(filename) if os.path.exists(filename) else ModelCard.from_template(ModelCardData()) card.data["library_name"] = "peft" model_config = getattr(self, "config", None) if hasattr(model_config, "to_dict"): model_config = model_config.to_dict() if model_config is not None: card.data["base_model"] = model_config["_name_or_path"] lines = card.text.splitlines() quantization_config = None if hasattr(model_config, "quantization_config"): quantization_config = self.config.quantization_config.to_dict() training_config_text = "" quantization_prefix = "The following `bitsandbytes` quantization config was used during training:" # Adds quantization information if it was used if quantization_config is not None: training_config_text += f"\n{quantization_prefix}\n" training_config_text += "\n".join([f"- {name}: {value}" for name, value in quantization_config.items()]) training_config_text += "\n" training_procedure_heading = "## Training procedure" if quantization_prefix not in lines and bool(training_config_text): if training_procedure_heading in lines: lines.insert(lines.index(training_procedure_heading) + 2, training_config_text) else: lines.append(f"{training_procedure_heading}\n{training_config_text}") # Adds peft version framework_block_heading = "### Framework versions" if f"- PEFT {__version__}" not in lines: if framework_block_heading in lines: lines.insert(lines.index(framework_block_heading) + 2, f"- PEFT {__version__}") else: lines.append(f"{framework_block_heading}\n\n- PEFT {__version__}") card.text = "\n".join(lines) card.save(filename) class PeftModelForSequenceClassification(PeftModel): """ Peft model for sequence classification tasks. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. - **cls_layer_name** (`str`) -- The name of the classification layer. Example: ```py >>> from transformers import AutoModelForSequenceClassification >>> from peft import PeftModelForSequenceClassification, get_peft_config >>> config = { ... "peft_type": "PREFIX_TUNING", ... "task_type": "SEQ_CLS", ... "inference_mode": False, ... "num_virtual_tokens": 20, ... "token_dim": 768, ... "num_transformer_submodules": 1, ... "num_attention_heads": 12, ... "num_layers": 12, ... "encoder_hidden_size": 768, ... "prefix_projection": False, ... "postprocess_past_key_value_function": None, ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForSequenceClassification(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117 ``` """ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: super().__init__(model, peft_config, adapter_name) if self.modules_to_save is None: self.modules_to_save = {"classifier", "score"} else: self.modules_to_save.update({"classifier", "score"}) for name, _ in self.base_model.named_children(): if any(module_name in name for module_name in self.modules_to_save): self.cls_layer_name = name break # to make sure classifier layer is trainable _set_trainable(self, adapter_name) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict peft_config = self.active_peft_config if not peft_config.is_prompt_learning: if peft_config.peft_type == PeftType.POLY: kwargs["task_ids"] = task_ids return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None kwargs.update( { "attention_mask": attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) else: if kwargs.get("token_type_ids", None) is not None: kwargs["token_type_ids"] = torch.cat( ( torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), kwargs["token_type_ids"], ), dim=1, ).long() if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def _prefix_tuning_forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): batch_size = _get_batch_size(input_ids, inputs_embeds) past_key_values = self.get_prompt(batch_size) fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) kwargs.update( { "input_ids": input_ids, "attention_mask": attention_mask, "inputs_embeds": inputs_embeds, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, "past_key_values": past_key_values, } ) if "past_key_values" in fwd_params: return self.base_model(labels=labels, **kwargs) else: transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) if "past_key_values" not in fwd_params: raise ValueError("Model does not support past key values which are required for prefix tuning.") outputs = transformer_backbone_name(**kwargs) pooled_output = outputs[1] if len(outputs) > 1 else outputs[0] if "dropout" in [name for name, _ in list(self.base_model.named_children())]: pooled_output = self.base_model.dropout(pooled_output) logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.base_model.num_labels == 1: self.config.problem_type = "regression" elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.base_model.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class PeftModelForCausalLM(PeftModel): """ Peft model for causal language modeling. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. Example: ```py >>> from transformers import AutoModelForCausalLM >>> from peft import PeftModelForCausalLM, get_peft_config >>> config = { ... "peft_type": "PREFIX_TUNING", ... "task_type": "CAUSAL_LM", ... "inference_mode": False, ... "num_virtual_tokens": 20, ... "token_dim": 1280, ... "num_transformer_submodules": 1, ... "num_attention_heads": 20, ... "num_layers": 36, ... "encoder_hidden_size": 1280, ... "prefix_projection": False, ... "postprocess_past_key_value_function": None, ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForCausalLM.from_pretrained("gpt2-large") >>> peft_model = PeftModelForCausalLM(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544 ``` """ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: super().__init__(model, peft_config, adapter_name) self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): peft_config = self.active_peft_config if not peft_config.is_prompt_learning: if self.base_model.config.model_type == "mpt": if inputs_embeds is not None: raise AssertionError("forward in MPTForCausalLM does not support inputs_embeds") return self.base_model( input_ids=input_ids, attention_mask=attention_mask, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) if peft_config.peft_type == PeftType.POLY: kwargs["task_ids"] = task_ids return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size) return self.base_model( input_ids=input_ids, inputs_embeds=inputs_embeds, past_key_values=past_key_values, **kwargs ) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) # concat prompt labels if labels is not None: prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device) kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1) prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def generate(self, *args, **kwargs): self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation if hasattr(self.base_model, "model"): self.base_model.model.generation_config = self.generation_config else: self.base_model.generation_config = self.generation_config try: outputs = self.base_model.generate(*args, **kwargs) except: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation raise else: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation return outputs def prepare_inputs_for_generation(self, *args, task_ids: torch.Tensor = None, **kwargs): peft_config = self.active_peft_config model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs) # https://github.com/huggingface/transformers/pull/26681/ introduced new cache format # for some architectures which requires a special fix for prompt tuning etc. # TODO: starting with transformers 4.38, all architectures should support caching. uses_transformers_4_38 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.38.0") uses_transformers_4_36 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.36.0") transformers_new_cache_archs = ["llama", "mistral", "persimmon", "phi"] uses_cache = uses_transformers_4_38 or ( uses_transformers_4_36 and self.base_model.config.model_type in transformers_new_cache_archs ) if peft_config.peft_type == PeftType.POLY: model_kwargs["task_ids"] = task_ids if peft_config.is_prompt_learning: if uses_cache and (model_kwargs["past_key_values"] is not None): # change in the logic of `prepare_inputs_for_generation` makes the below code necessary # In prompt learning methods, past key values are longer when compared to the `input_ids`. # As such only consider the last input ids in the autogressive generation phase. if model_kwargs["past_key_values"][0][0].shape[-2] >= model_kwargs["input_ids"].shape[1]: model_kwargs["input_ids"] = model_kwargs["input_ids"][:, -1:] if model_kwargs.get("attention_mask", None) is not None: size = model_kwargs["input_ids"].shape[0], peft_config.num_virtual_tokens prefix_attention_mask = torch.ones(size).to(model_kwargs["input_ids"].device) model_kwargs["attention_mask"] = torch.cat( (prefix_attention_mask, model_kwargs["attention_mask"]), dim=1 ) if model_kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") model_kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn( "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids" ) kwargs["token_type_ids"] = None if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0]) model_kwargs["past_key_values"] = past_key_values else: if model_kwargs["past_key_values"] is None: inputs_embeds = self.word_embeddings(model_kwargs["input_ids"]) prompts = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0], task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) model_kwargs["inputs_embeds"] = torch.cat((prompts, inputs_embeds), dim=1) model_kwargs["input_ids"] = None return model_kwargs class PeftModelForSeq2SeqLM(PeftModel): """ Peft model for sequence-to-sequence language modeling. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. Example: ```py >>> from transformers import AutoModelForSeq2SeqLM >>> from peft import PeftModelForSeq2SeqLM, get_peft_config >>> config = { ... "peft_type": "LORA", ... "task_type": "SEQ_2_SEQ_LM", ... "inference_mode": False, ... "r": 8, ... "target_modules": ["q", "v"], ... "lora_alpha": 32, ... "lora_dropout": 0.1, ... "fan_in_fan_out": False, ... "enable_lora": None, ... "bias": "none", ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566 ``` """ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: super().__init__(model, peft_config, adapter_name) self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation self.base_model_prepare_encoder_decoder_kwargs_for_generation = ( self.base_model._prepare_encoder_decoder_kwargs_for_generation ) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): peft_config = self.active_peft_config if not peft_config.is_prompt_learning: if peft_config.peft_type == PeftType.POLY: kwargs["task_ids"] = task_ids return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_inputs_embeds=decoder_inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if decoder_attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( decoder_attention_mask.device ) if peft_config.peft_type not in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]: decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size) return self.base_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, **kwargs, ) elif peft_config.peft_type in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( attention_mask.device ) kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1) prompts = self.get_prompt(batch_size=batch_size) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) return self.base_model( inputs_embeds=inputs_embeds, decoder_input_ids=decoder_input_ids, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs, ) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if decoder_inputs_embeds is None and decoder_input_ids is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) decoder_inputs_embeds = self.word_embeddings(decoder_input_ids) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( attention_mask.device ) kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1) # concat prompt labels if labels is not None: if peft_config.num_transformer_submodules == 1: kwargs["labels"] = labels elif peft_config.num_transformer_submodules == 2: prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device) kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1) prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) if peft_config.num_transformer_submodules == 1: return self.base_model(inputs_embeds=inputs_embeds, **kwargs) elif peft_config.num_transformer_submodules == 2: decoder_inputs_embeds = torch.cat( (prompts[:, peft_config.num_virtual_tokens :], decoder_inputs_embeds), dim=1 ) return self.base_model( inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs ) def generate(self, **kwargs): peft_config = self.active_peft_config self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( self._prepare_encoder_decoder_kwargs_for_generation ) try: if not peft_config.is_prompt_learning: outputs = self.base_model.generate(**kwargs) else: if "input_ids" not in kwargs: raise ValueError("input_ids must be provided for Peft model generation") if kwargs.get("position_ids", None) is not None: warnings.warn( "Position ids are not supported for parameter efficient tuning. Ignoring position ids." ) kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn( "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids" ) kwargs["token_type_ids"] = None if peft_config.peft_type == PeftType.PREFIX_TUNING: outputs = self.base_model.generate(**kwargs) elif peft_config.peft_type in [ PeftType.PROMPT_TUNING, PeftType.P_TUNING, PeftType.MULTITASK_PROMPT_TUNING, ]: kwargs = deepcopy(kwargs) if "encoder_outputs" in kwargs: del kwargs["encoder_ouputs"] warnings.warn( "`encoder_outputs` should not be passed to `generate` when using prompt tuning. Ignoring it." ) input_ids = kwargs.pop("input_ids") inputs_embeds = self.word_embeddings(input_ids) batch_size = inputs_embeds.shape[0] prompts = self.get_prompt(batch_size=batch_size, task_ids=kwargs.pop("task_ids", None)) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) kwargs["inputs_embeds"] = inputs_embeds if "attention_mask" in kwargs: prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( kwargs["attention_mask"].device ) kwargs["attention_mask"] = torch.cat((prefix_attention_mask, kwargs["attention_mask"]), dim=1) return self.base_model.generate(**kwargs) else: raise NotImplementedError except: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( self.base_model_prepare_encoder_decoder_kwargs_for_generation ) raise else: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( self.base_model_prepare_encoder_decoder_kwargs_for_generation ) return outputs def prepare_inputs_for_generation(self, *args, task_ids: torch.Tensor = None, **kwargs): peft_config = self.active_peft_config model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs) if peft_config.peft_type == PeftType.POLY: model_kwargs["task_ids"] = task_ids if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING: batch_size = model_kwargs["decoder_input_ids"].shape[0] past_key_values = self.get_prompt(batch_size) model_kwargs["past_key_values"] = past_key_values return model_kwargs class PeftModelForTokenClassification(PeftModel): """ Peft model for token classification tasks. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. - **cls_layer_name** (`str`) -- The name of the classification layer. Example: ```py >>> from transformers import AutoModelForSequenceClassification >>> from peft import PeftModelForTokenClassification, get_peft_config >>> config = { ... "peft_type": "PREFIX_TUNING", ... "task_type": "TOKEN_CLS", ... "inference_mode": False, ... "num_virtual_tokens": 20, ... "token_dim": 768, ... "num_transformer_submodules": 1, ... "num_attention_heads": 12, ... "num_layers": 12, ... "encoder_hidden_size": 768, ... "prefix_projection": False, ... "postprocess_past_key_value_function": None, ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForTokenClassification.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForTokenClassification(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117 ``` """ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig = None, adapter_name: str = "default") -> None: super().__init__(model, peft_config, adapter_name) if self.modules_to_save is None: self.modules_to_save = {"classifier", "score"} else: self.modules_to_save.update({"classifier", "score"}) for name, _ in self.base_model.named_children(): if any(module_name in name for module_name in self.modules_to_save): self.cls_layer_name = name break # to make sure classifier layer is trainable _set_trainable(self, adapter_name) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): peft_config = self.active_peft_config return_dict = return_dict if return_dict is not None else self.config.use_return_dict if not peft_config.is_prompt_learning: if peft_config.peft_type == PeftType.POLY: kwargs["task_ids"] = task_ids return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None kwargs.update( { "attention_mask": attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) else: if kwargs.get("token_type_ids", None) is not None: kwargs["token_type_ids"] = torch.cat( ( torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), kwargs["token_type_ids"], ), dim=1, ).long() if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def _prefix_tuning_forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): batch_size = _get_batch_size(input_ids, inputs_embeds) past_key_values = self.get_prompt(batch_size) fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) kwargs.update( { "input_ids": input_ids, "attention_mask": attention_mask, "inputs_embeds": inputs_embeds, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, "past_key_values": past_key_values, } ) if "past_key_values" in fwd_params: return self.base_model(labels=labels, **kwargs) else: transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) if "past_key_values" not in fwd_params: raise ValueError("Model does not support past key values which are required for prefix tuning.") outputs = transformer_backbone_name(**kwargs) sequence_output = outputs[0] if "dropout" in [name for name, _ in list(self.base_model.named_children())]: sequence_output = self.base_model.dropout(sequence_output) logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class PeftModelForQuestionAnswering(PeftModel): """ Peft model for extractive question answering. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. - **cls_layer_name** (`str`) -- The name of the classification layer. Example: ```py >>> from transformers import AutoModelForQuestionAnswering >>> from peft import PeftModelForQuestionAnswering, get_peft_config >>> config = { ... "peft_type": "LORA", ... "task_type": "QUESTION_ANS", ... "inference_mode": False, ... "r": 16, ... "target_modules": ["query", "value"], ... "lora_alpha": 32, ... "lora_dropout": 0.05, ... "fan_in_fan_out": False, ... "bias": "none", ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForQuestionAnswering.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForQuestionAnswering(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 592900 || all params: 108312580 || trainable%: 0.5473971721475013 ``` """ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: super().__init__(model, peft_config, adapter_name) if self.modules_to_save is None: self.modules_to_save = {"qa_outputs"} else: self.modules_to_save.update({"qa_outputs"}) for name, _ in self.base_model.named_children(): if any(module_name in name for module_name in self.modules_to_save): self.cls_layer_name = name break # to make sure classifier layer is trainable _set_trainable(self, adapter_name) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): peft_config = self.active_peft_config return_dict = return_dict if return_dict is not None else self.config.use_return_dict if not peft_config.is_prompt_learning: if peft_config.peft_type == PeftType.POLY: kwargs["task_ids"] = task_ids return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, start_positions=start_positions, end_positions=end_positions, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None kwargs.update( { "attention_mask": attention_mask, "start_positions": start_positions, "end_positions": end_positions, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) else: if kwargs.get("token_type_ids", None) is not None: kwargs["token_type_ids"] = torch.cat( ( torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), kwargs["token_type_ids"], ), dim=1, ).long() if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def _prefix_tuning_forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): batch_size = _get_batch_size(input_ids, inputs_embeds) past_key_values = self.get_prompt(batch_size) fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) kwargs.update( { "input_ids": input_ids, "attention_mask": attention_mask, "inputs_embeds": inputs_embeds, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, "past_key_values": past_key_values, } ) if "past_key_values" in fwd_params: return self.base_model(start_positions=start_positions, end_positions=end_positions, **kwargs) else: transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) if "past_key_values" not in fwd_params: raise ValueError("Model does not support past key values which are required for prefix tuning.") outputs = transformer_backbone_name(**kwargs) sequence_output = outputs[0] if "dropout" in [name for name, _ in list(self.base_model.named_children())]: sequence_output = self.base_model.dropout(sequence_output) logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class PeftModelForFeatureExtraction(PeftModel): """ Peft model for extracting features/embeddings from transformer models Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. Example: ```py >>> from transformers import AutoModel >>> from peft import PeftModelForFeatureExtraction, get_peft_config >>> config = { ... "peft_type": "LORA", ... "task_type": "FEATURE_EXTRACTION", ... "inference_mode": False, ... "r": 16, ... "target_modules": ["query", "value"], ... "lora_alpha": 32, ... "lora_dropout": 0.05, ... "fan_in_fan_out": False, ... "bias": "none", ... } >>> peft_config = get_peft_config(config) >>> model = AutoModel.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForFeatureExtraction(model, peft_config) >>> peft_model.print_trainable_parameters() ``` """ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default"): super().__init__(model, peft_config, adapter_name) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): peft_config = self.active_peft_config if not peft_config.is_prompt_learning: if peft_config.peft_type == PeftType.POLY: kwargs["task_ids"] = task_ids return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size) return self.base_model(input_ids=input_ids, past_key_values=past_key_values, **kwargs) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
peft/src/peft/peft_model.py/0
{ "file_path": "peft/src/peft/peft_model.py", "repo_id": "peft", "token_count": 40197 }
160
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import List, Optional, Union from peft.config import PeftConfig from peft.utils import PeftType @dataclass class IA3Config(PeftConfig): """ This is the configuration class to store the configuration of a [`IA3Model`]. Args: target_modules (`Optional[Union[List[str], str]]`): The names of the modules to apply the adapter to. If this is specified, only the modules with the specified names will be replaced. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen, excluding the output layer. If this is not specified, modules will be chosen according to the model architecture. If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually. feedforward_modules (`Optional[Union[List[str], str]]`): The names of the modules to be treated as feedforward modules, as in the original paper. These modules will have (IA)³ vectors multiplied to the input, instead of the output. `feedforward_modules` must be a name or a subset of names present in `target_modules`. fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`. modules_to_save (`Optional[List[str]]`): List of modules apart from (IA)³ layers to be set as trainable and saved in the final checkpoint. init_ia3_weights (`bool`): Whether to initialize the vectors in the (IA)³ layers, defaults to `True`. Setting this to `False` is discouraged. """ target_modules: Optional[Union[List[str], str]] = field( default=None, metadata={ "help": ( "List of module names or regex expression of the module names to replace with (IA)³." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'." "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer." "If not specified, modules will be chosen according to the model architecture, If the architecture is " "not known, an error will be raised -- in this case, you shoud specify the target modules manually." ), }, ) feedforward_modules: Optional[Union[List[str], str]] = field( default=None, metadata={ "help": "List of module names or a regex expression of module names which are feedforward" "For example, ['output.dense']" }, ) fan_in_fan_out: bool = field( default=False, metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, ) modules_to_save: Optional[List[str]] = field( default=None, metadata={ "help": "List of modules apart from (IA)^3 layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) init_ia3_weights: bool = field( default=True, metadata={"help": "Whether to initialize the vectors in the (IA)^3 layers."}, ) def __post_init__(self): self.peft_type = PeftType.IA3 self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) self.feedforward_modules = ( set(self.feedforward_modules) if isinstance(self.feedforward_modules, list) else self.feedforward_modules ) # check if feedforward_modules is a subset of target_modules. run the check only if both are sets if isinstance(self.feedforward_modules, set) and isinstance(self.target_modules, set): if not self.feedforward_modules.issubset(self.target_modules): raise ValueError("`feedforward_modules` should be a subset of `target_modules`")
peft/src/peft/tuners/ia3/config.py/0
{ "file_path": "peft/src/peft/tuners/ia3/config.py", "repo_id": "peft", "token_count": 1850 }
161
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import math import operator import re import warnings from dataclasses import asdict, replace from enum import Enum from functools import reduce from itertools import chain from typing import List, Optional import torch from torch import nn from tqdm import tqdm from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists, onload_layer from peft.utils import ( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _freeze_adapter, _get_submodules, get_quantization_config, ) from .config import LoraConfig from .gptq import dispatch_gptq from .layer import Conv2d, LoraLayer, dispatch_default from .tp_layer import dispatch_megatron class LoraModel(BaseTuner): """ Creates Low Rank Adapter (LoRA) model from a pretrained transformers model. The method is described in detail in https://arxiv.org/abs/2106.09685. Args: model ([`torch.nn.Module`]): The model to be adapted. config ([`LoraConfig`]): The configuration of the Lora model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. Returns: `torch.nn.Module`: The Lora model. Example: ```py >>> from transformers import AutoModelForSeq2SeqLM >>> from peft import LoraModel, LoraConfig >>> config = LoraConfig( ... task_type="SEQ_2_SEQ_LM", ... r=8, ... lora_alpha=32, ... target_modules=["q", "v"], ... lora_dropout=0.01, ... ) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> lora_model = LoraModel(model, config, "default") ``` ```py >>> import transformers >>> from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_int8_training >>> target_modules = ["q_proj", "k_proj", "v_proj", "out_proj", "fc_in", "fc_out", "wte"] >>> config = LoraConfig( ... r=4, lora_alpha=16, target_modules=target_modules, lora_dropout=0.1, bias="none", task_type="CAUSAL_LM" ... ) >>> model = transformers.GPTJForCausalLM.from_pretrained( ... "kakaobrain/kogpt", ... revision="KoGPT6B-ryan1.5b-float16", # or float32 version: revision=KoGPT6B-ryan1.5b ... pad_token_id=tokenizer.eos_token_id, ... use_cache=False, ... device_map={"": rank}, ... torch_dtype=torch.float16, ... load_in_8bit=True, ... ) >>> model = prepare_model_for_int8_training(model) >>> lora_model = get_peft_model(model, config) ``` **Attributes**: - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted. - **peft_config** ([`LoraConfig`]): The configuration of the Lora model. """ prefix: str = "lora_" def __init__(self, model, config, adapter_name) -> None: super().__init__(model, config, adapter_name) def _check_new_adapter_config(self, config: LoraConfig) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ # TODO: there should be a check if any of the existing adapters actually has bias != "none", or else the check # does not fully correspond to the error message. if (len(self.peft_config) > 1) and (config.bias != "none"): raise ValueError( f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, " "set bias to 'none' for all adapters." ) @staticmethod def _check_target_module_exists(lora_config, key): return check_target_module_exists(lora_config, key) def _create_and_replace( self, lora_config, adapter_name, target, target_name, parent, current_key, ): if current_key is None: raise ValueError("Current Key shouldn't be `None`") # Regexp matching - Find key which matches current target_name in patterns provided pattern_keys = list(chain(lora_config.rank_pattern.keys(), lora_config.alpha_pattern.keys())) target_name_key = next(filter(lambda key: re.match(f".*\.{key}$", current_key), pattern_keys), current_key) r = lora_config.rank_pattern.get(target_name_key, lora_config.r) alpha = lora_config.alpha_pattern.get(target_name_key, lora_config.lora_alpha) kwargs = { "r": r, "lora_alpha": alpha, "lora_dropout": lora_config.lora_dropout, "fan_in_fan_out": lora_config.fan_in_fan_out, "init_lora_weights": lora_config.init_lora_weights, "use_rslora": lora_config.use_rslora, "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False), "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False), } quantization_config = get_quantization_config(self.model, method="gptq") if quantization_config is not None: kwargs["gptq_quantization_config"] = quantization_config # note: AdaLoraLayer is a subclass of LoraLayer, we need to exclude it from peft.tuners.adalora import AdaLoraLayer if isinstance(target, LoraLayer) and not isinstance(target, AdaLoraLayer): target.update_layer( adapter_name, r, alpha, lora_config.lora_dropout, lora_config.init_lora_weights, lora_config.use_rslora, ) else: new_module = self._create_new_module(lora_config, adapter_name, target, **kwargs) if adapter_name != self.active_adapter: # adding an additional adapter: it is not automatically trainable new_module.requires_grad_(False) self._replace_module(parent, target_name, new_module, target) def _replace_module(self, parent, child_name, new_module, child): setattr(parent, child_name, new_module) # It's not necessary to set requires_grad here, as that is handled by # _mark_only_adapters_as_trainable # child layer wraps the original module, unpack it if hasattr(child, "base_layer"): child = child.base_layer if not hasattr(new_module, "base_layer"): new_module.weight = child.weight if hasattr(child, "bias"): new_module.bias = child.bias if getattr(child, "state", None) is not None: if hasattr(new_module, "base_layer"): new_module.base_layer.state = child.state else: new_module.state = child.state new_module.to(child.weight.device) # dispatch to correct device for name, module in new_module.named_modules(): if (self.prefix in name) or ("ranknum" in name): weight = child.qweight if hasattr(child, "qweight") else child.weight module.to(weight.device) def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: for n, p in model.named_parameters(): if self.prefix not in n: p.requires_grad = False for active_adapter in self.active_adapters: bias = self.peft_config[active_adapter].bias if bias == "none": continue if bias == "all": for n, p in model.named_parameters(): if "bias" in n: p.requires_grad = True elif bias == "lora_only": for m in model.modules(): if isinstance(m, LoraLayer) and hasattr(m, "bias") and m.bias is not None: m.bias.requires_grad = True else: raise NotImplementedError(f"Requested bias: {bias}, is not implemented.") @staticmethod def _create_new_module(lora_config, adapter_name, target, **kwargs): # Collect dispatcher functions to decide what backend to use for the replaced LoRA layer. The order matters, # because the first match is always used. Therefore, the default layers should be checked last. dispatchers = [] # avoid eager bnb import if is_bnb_available(): from .bnb import dispatch_bnb_8bit dispatchers.append(dispatch_bnb_8bit) if is_bnb_4bit_available(): from .bnb import dispatch_bnb_4bit dispatchers.append(dispatch_bnb_4bit) dispatchers.extend([dispatch_gptq, dispatch_megatron, dispatch_default]) new_module = None for dispatcher in dispatchers: new_module = dispatcher(target, adapter_name, lora_config=lora_config, **kwargs) if new_module is not None: # first match wins break if new_module is None: # no module could be matched raise ValueError( f"Target module {target} is not supported. Currently, only the following modules are supported: " "`torch.nn.Linear`, `torch.nn.Embedding`, `torch.nn.Conv2d`, `transformers.pytorch_utils.Conv1D`." ) return new_module def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.model, name) def get_peft_config_as_dict(self, inference: bool = False): config_dict = {} for key, value in self.peft_config.items(): config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()} if inference: config["inference_mode"] = True config_dict[key] = config return config def _set_adapter_layers(self, enabled: bool = True) -> None: for module in self.model.modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def enable_adapter_layers(self) -> None: """Enable all adapters. Call this if you have previously disabled all adapters and want to re-enable them. """ self._set_adapter_layers(enabled=True) def disable_adapter_layers(self) -> None: """Disable all adapters. When disabling all adapters, the model output corresponds to the output of the base model. """ for active_adapter in self.active_adapters: val = self.peft_config[active_adapter].bias if val != "none": msg = ( f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same " "output as the the base model would without adaption." ) warnings.warn(msg) self._set_adapter_layers(enabled=False) def set_adapter(self, adapter_name: str | list[str]) -> None: """Set the active adapter(s). Args: adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated. """ for module in self.model.modules(): if isinstance(module, LoraLayer): if module.merged: warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") module.unmerge() module.set_adapter(adapter_name) self.active_adapter = adapter_name @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = set( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]] ) return peft_config def _unload_and_optionally_merge( self, merge=True, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[List[str]] = None, ): if merge: if getattr(self.model, "quantization_method", None) == "gptq": raise ValueError("Cannot merge LORA layers when the model is gptq quantized") key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] desc = "Unloading " + ("and merging " if merge else "") + "model" for key in tqdm(key_list, disable=not progressbar, desc=desc): try: parent, target, target_name = _get_submodules(self.model, key) except AttributeError: continue with onload_layer(target): if hasattr(target, "base_layer"): if merge: target.merge(safe_merge=safe_merge, adapter_names=adapter_names) self._replace_module(parent, target_name, target.get_base_layer(), target) elif isinstance(target, ModulesToSaveWrapper): # save any additional trainable modules part of `modules_to_save` setattr(parent, target_name, target.modules_to_save[target.active_adapter]) return self.model def add_weighted_adapter( self, adapters, weights, adapter_name, combination_type="svd", svd_rank=None, svd_clamp=None, svd_full_matrices=True, svd_driver=None, ) -> None: """ This method adds a new adapter by merging the given adapters with the given weights. When using the `cat` combination_type you should be aware that rank of the resulting adapter will be equal to the sum of all adapters ranks. So it's possible that the mixed adapter may become too big and result in OOM errors. Args: adapters (`list`): List of adapter names to be merged. weights (`list`): List of weights for each adapter. adapter_name (`str`): Name of the new adapter. combination_type (`str`): Type of merging. Can be one of [`svd`, `linear`, `cat`]. When using the `cat` combination_type you should be aware that rank of the resulting adapter will be equal to the sum of all adapters ranks. So it's possible that the mixed adapter may become too big and result in OOM errors. svd_rank (`int`, *optional*): Rank of output adapter for svd. If None provided, will use max rank of merging adapters. svd_clamp (`float`, *optional*): A quantile threshold for clamping SVD decomposition output. If None is provided, do not perform clamping. Defaults to None. svd_full_matrices (`bool`, *optional*): Controls whether to compute the full or reduced SVD, and consequently, the shape of the returned tensors U and Vh. Defaults to True. svd_driver (`str`, *optional*): Name of the cuSOLVER method to be used. This keyword argument only works when merging on CUDA. Can be one of [None, `gesvd`, `gesvdj`, `gesvda`]. For more info please refer to `torch.linalg.svd` documentation. Defaults to None. """ if adapter_name in list(self.peft_config.keys()): return for adapter in adapters: if adapter not in list(self.peft_config.keys()): raise ValueError(f"Adapter {adapter} does not exist") # if there is only one adapter, we can only use linear merging combination_type = "linear" if len(adapters) == 1 else combination_type adapters_ranks = [self.peft_config[adapter].r for adapter in adapters] if combination_type == "linear": # all adapters ranks should be same, new rank is just this value if len(set(adapters_ranks)) != 1: raise ValueError("All adapters must have the same r value when using `linear` combination_type") new_rank = adapters_ranks[0] elif combination_type == "cat": # adapters ranks may be different, new rank is sum of all ranks # be careful, because output adapter rank may be really big if mixing a lot of adapters new_rank = sum(adapters_ranks) elif combination_type == "svd": # new rank is the max of all ranks of the adapters if not provided new_rank = svd_rank or max(adapters_ranks) else: raise ValueError(f"Invalid combination_type: {combination_type}") target_module_types = [type(self.peft_config[adapter].target_modules) for adapter in adapters] if not target_module_types: raise ValueError(f"Found no adapter matching the names in {adapters}") if len(set(target_module_types)) > 1: raise ValueError( "all adapter configs should follow the same target modules type. " "Combining adapters with `target_modules` type being a mix of list/set and string is not supported." ) if target_module_types[0] == str: new_target_modules = "|".join(f"({self.peft_config[adapter].target_modules})" for adapter in adapters) elif target_module_types[0] == set: new_target_modules = reduce( operator.or_, (self.peft_config[adapter].target_modules for adapter in adapters) ) else: raise TypeError(f"Invalid type {target_module_types[0]} found in target_modules") self.peft_config[adapter_name] = replace( self.peft_config[adapters[0]], r=new_rank, lora_alpha=new_rank, target_modules=new_target_modules, ) self.inject_adapter(self.model, adapter_name) # Do we really need that? _freeze_adapter(self.model, adapter_name) key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, LoraLayer): if adapter_name in target.lora_A: target_lora_A = target.lora_A[adapter_name].weight target_lora_B = target.lora_B[adapter_name].weight elif adapter_name in target.lora_embedding_A: target_lora_A = target.lora_embedding_A[adapter_name] target_lora_B = target.lora_embedding_B[adapter_name] else: continue target_lora_A.data = target_lora_A.data * 0.0 target_lora_B.data = target_lora_B.data * 0.0 if combination_type == "linear": for adapter, weight in zip(adapters, weights): if adapter in target.lora_A: current_adapter_lora_A = target.lora_A[adapter].weight current_adapter_lora_B = target.lora_B[adapter].weight elif adapter in target.lora_embedding_A: current_adapter_lora_A = target.lora_embedding_A[adapter] current_adapter_lora_B = target.lora_embedding_B[adapter] else: continue target_lora_A.data += current_adapter_lora_A.data * math.sqrt(weight) * target.scaling[adapter] target_lora_B.data += current_adapter_lora_B.data * math.sqrt(weight) elif combination_type == "cat": loras_A, loras_B = [], [] for adapter, weight in zip(adapters, weights): if adapter in target.lora_A: current_adapter_lora_A = target.lora_A[adapter].weight current_adapter_lora_B = target.lora_B[adapter].weight elif adapter in target.lora_embedding_A: current_adapter_lora_A = target.lora_embedding_A[adapter] current_adapter_lora_B = target.lora_embedding_B[adapter] else: continue loras_A.append(current_adapter_lora_A.data * weight * target.scaling[adapter]) loras_B.append(current_adapter_lora_B.data) if len(loras_A) == 0: raise ValueError("No matching LoRAs found. Please raise an issue on Github.") loras_A = torch.cat(loras_A, dim=0) loras_B = torch.cat(loras_B, dim=1) target_lora_A.data[: loras_A.shape[0], :] = loras_A target_lora_B.data[:, : loras_B.shape[1]] = loras_B elif combination_type == "svd": target_lora_A.data, target_lora_B.data = self._svd_weighted_adapter( adapters, weights, new_rank, target, target_lora_A, target_lora_B, svd_clamp, full_matrices=svd_full_matrices, driver=svd_driver, ) def _svd_weighted_adapter( self, adapters, weights, new_rank, target, target_lora_A, target_lora_B, clamp=None, full_matrices=True, driver=None, ): valid_adapters = [] valid_weights = [] for adapter, weight in zip(adapters, weights): if adapter in target.lora_A or adapter in target.lora_embedding_A: valid_adapters.append(adapter) valid_weights.append(weight) # if no valid adapter, nothing to do if len(valid_adapters) == 0: raise ValueError("No matching LoRAs found. Please raise an issue on Github.") delta_weight = valid_weights[0] * target.get_delta_weight(valid_adapters[0]) for adapter, weight in zip(valid_adapters[1:], valid_weights[1:]): delta_weight += weight * target.get_delta_weight(adapter) conv2d = isinstance(target, Conv2d) if conv2d: conv2d_1x1 = target.weight.size()[2:4] == (1, 1) if not conv2d_1x1: delta_weight = delta_weight.flatten(start_dim=1) else: delta_weight = delta_weight.squeeze() if hasattr(target, "fan_in_fan_out") and target.fan_in_fan_out: delta_weight = delta_weight.T # based on https://github.com/kohya-ss/sd-scripts/blob/main/networks/svd_merge_lora.py#L114-L131 U, S, Vh = torch.linalg.svd(delta_weight, full_matrices=full_matrices, driver=driver) U = U[:, :new_rank] S = S[:new_rank] U = U @ torch.diag(S) Vh = Vh[:new_rank, :] if clamp is not None: dist = torch.cat([U.flatten(), Vh.flatten()]) hi_val = torch.quantile(dist, clamp) low_val = -hi_val U = U.clamp(low_val, hi_val) Vh = Vh.clamp(low_val, hi_val) if conv2d: U = U.reshape(target_lora_B.data.shape) Vh = Vh.reshape(target_lora_A.data.shape) return Vh, U def delete_adapter(self, adapter_name: str) -> None: """ Deletes an existing adapter. Args: adapter_name (str): Name of the adapter to be deleted. """ if adapter_name not in list(self.peft_config.keys()): raise ValueError(f"Adapter {adapter_name} does not exist") del self.peft_config[adapter_name] key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] new_adapter = None for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, LoraLayer): target.delete_adapter(adapter_name) if new_adapter is None: new_adapter = target.active_adapters[:] self.active_adapter = new_adapter or [] def merge_and_unload( self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[List[str]] = None ) -> torch.nn.Module: r""" This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. Example: ```py >>> from transformers import AutoModelForCausalLM >>> from peft import PeftModel >>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b") >>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample" >>> model = PeftModel.from_pretrained(base_model, peft_model_id) >>> merged_model = model.merge_and_unload() ``` """ return self._unload_and_optionally_merge( progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names ) def unload(self) -> torch.nn.Module: """ Gets back the base model by removing all the lora modules without merging. This gives back the original base model. """ return self._unload_and_optionally_merge(merge=False)
peft/src/peft/tuners/lora/model.py/0
{ "file_path": "peft/src/peft/tuners/lora/model.py", "repo_id": "peft", "token_count": 12522 }
162
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import List, Literal, Optional, Union from peft.config import PeftConfig from peft.utils import PeftType @dataclass class PolyConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`PolyModel`]. - [Polytropon (Poly)](https://arxiv.org/abs/2202.13914) - [Multi-Head Routing (MHR)](https://arxiv.org/abs/2211.03831) Args: r (`int`): Attention dimension of each Lora in Poly. target_modules (`Union[List[str],str]`): The names of the modules to apply Poly to. modules_to_save (`List[str]`): List of modules apart from Poly layers to be set as trainable and saved in the final checkpoint. init_weights (bool): Whether to perform initialization of Poly weights. poly_type (`Literal["poly"]`): The variant of the Poly module to use. Currently, only "poly" is supported. n_tasks (`int`): The number of tasks in a multitasking scenario. n_skills (`int`): The number of skills (LoRA) in each Poly layer. n_splits (`int`): The number of splits within each LoRA of a Poly layer. A value greater than 1 indicates the use of Multi-Head Routing (MHR). """ r: int = field(default=8, metadata={"help": "Lora attention dimension"}) target_modules: Optional[Union[List[str], str]] = field( default=None, metadata={ "help": "List of module names or regex expression of the module names to replace with Poly." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " }, ) modules_to_save: Optional[List[str]] = field( default=None, metadata={ "help": "List of modules apart from Poly layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) init_weights: bool = field( default=True, metadata={ "help": ( "Whether to initialize the weights of the Poly layers with their default initialization. Don't change " "this setting, except if you know exactly what you're doing." ), }, ) poly_type: Literal["poly"] = field( default="poly", metadata={"help": 'Type of Poly modules to be used. Currently only "poly" is supported.'}, ) n_tasks: int = field( default=1, metadata={"help": "Number of tasks in multitasking scenario."}, ) n_skills: int = field( default=4, metadata={"help": "Number of skills (LoRA) in each Poly layer."}, ) n_splits: int = field( default=1, metadata={"help": "Number of splits within each LoRA of a Poly layer."}, ) def __post_init__(self): self.peft_type = PeftType.POLY self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules )
peft/src/peft/tuners/poly/config.py/0
{ "file_path": "peft/src/peft/tuners/poly/config.py", "repo_id": "peft", "token_count": 1415 }
163
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import warnings from typing import Optional import torch from huggingface_hub import file_exists, hf_hub_download from huggingface_hub.utils import EntryNotFoundError, HFValidationError from safetensors.torch import load_file as safe_load_file from .other import EMBEDDING_LAYER_NAMES, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, infer_device from .peft_types import PeftType def has_valid_embedding_base_layer(layer): """Check if the layer has an embedding base layer""" return hasattr(layer, "base_layer") and isinstance(layer.base_layer, (torch.nn.Linear, torch.nn.Embedding)) def get_embedding_layer_name(model, layer, is_embedding_in_target_modules): """Get the name of the embedding module for a given layer.""" for name, module in model.named_modules(): if (not is_embedding_in_target_modules and module == layer) or module == getattr(layer, "base_layer", None): return name return None def get_peft_model_state_dict( model, state_dict=None, adapter_name="default", unwrap_compiled=False, save_embedding_layers="auto" ): """ Get the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP, the model should be the underlying model/unwrapped model (i.e. model.module). state_dict (`dict`, *optional*, defaults to `None`): The state dict of the model. If not provided, the state dict of the passed model will be used. adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter whose state dict should be returned. unwrap_compiled (`bool`, *optional*, defaults to `False`): Whether to unwrap the model if torch.compile was used. save_embedding_layers (`Union[bool, str]`, , *optional*, defaults to `auto`): If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. Based on it sets the boolean flag. This only works for 🤗 transformers models. """ if unwrap_compiled: model = getattr(model, "_orig_mod", model) config = model.peft_config[adapter_name] if state_dict is None: state_dict = model.state_dict() if config.peft_type in (PeftType.LORA, PeftType.ADALORA): # to_return = lora_state_dict(model, bias=model.peft_config.bias) # adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py` # to be used directly with the state dict which is necessary when using DeepSpeed or FSDP bias = config.bias if bias == "none": to_return = {k: state_dict[k] for k in state_dict if "lora_" in k} elif bias == "all": to_return = {k: state_dict[k] for k in state_dict if "lora_" in k or "bias" in k} elif bias == "lora_only": to_return = {} for k in state_dict: if "lora_" in k: to_return[k] = state_dict[k] bias_name = k.split("lora_")[0] + "bias" if bias_name in state_dict: to_return[bias_name] = state_dict[bias_name] else: raise NotImplementedError to_return = {k: v for k, v in to_return.items() if (("lora_" in k and adapter_name in k) or ("bias" in k))} if config.peft_type == PeftType.ADALORA: rank_pattern = config.rank_pattern if rank_pattern is not None: rank_pattern = {k.replace(f".{adapter_name}", ""): v for k, v in rank_pattern.items()} config.rank_pattern = rank_pattern to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name) elif config.peft_type == PeftType.LOHA: to_return = {k: state_dict[k] for k in state_dict if "hada_" in k} elif config.peft_type == PeftType.LOKR: to_return = {k: state_dict[k] for k in state_dict if "lokr_" in k} elif config.peft_type == PeftType.ADAPTION_PROMPT: to_return = {k: state_dict[k] for k in state_dict if k.split(".")[-1].startswith("adaption_")} elif config.is_prompt_learning: to_return = {} if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: to_return["prefix_task_cols"] = model.prompt_encoder[adapter_name].prefix_task_cols to_return["prefix_task_rows"] = model.prompt_encoder[adapter_name].prefix_task_rows prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight else: if config.inference_mode: prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight else: prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name) to_return["prompt_embeddings"] = prompt_embeddings elif config.peft_type == PeftType.IA3: to_return = {k: state_dict[k] for k in state_dict if "ia3_" in k} elif config.peft_type == PeftType.OFT: to_return = {k: state_dict[k] for k in state_dict if "oft_" in k} elif config.peft_type == PeftType.POLY: to_return = {k: state_dict[k] for k in state_dict if "poly_" in k} else: raise NotImplementedError if getattr(model, "modules_to_save", None) is not None: for key, value in state_dict.items(): if any(f"{module_name}.modules_to_save.{adapter_name}" in key for module_name in model.modules_to_save): to_return[key.replace("modules_to_save.", "")] = value # check the common embedding layers in `target_modules` to reset `save_embedding_layers` if necessary is_embedding_in_target_modules = False if ( save_embedding_layers == "auto" and hasattr(config, "target_modules") and any(k in config.target_modules for k in EMBEDDING_LAYER_NAMES) ): warnings.warn("Setting `save_embedding_layers` to `True` as embedding layers found in `target_modules`.") save_embedding_layers = is_embedding_in_target_modules = True elif save_embedding_layers == "auto": vocab_size = getattr(getattr(model, "config", None), "vocab_size", None) model_id = getattr(config, "base_model_name_or_path", None) # For some models e.g. diffusers the text config file is stored in a subfolder # we need to make sure we can download that config. has_remote_config = False if model_id is not None: try: has_remote_config = file_exists(model_id, "config.json") except (HFValidationError, EntryNotFoundError): warnings.warn( f"Could not find a config file in {model_id} - will assume that the vocabulary was not modified." ) has_remote_config = False # check if the vocab size of the base model is different from the vocab size of the finetuned model if ( vocab_size and model_id and has_remote_config and (vocab_size != model.config.__class__.from_pretrained(model_id).vocab_size) ): warnings.warn( "Setting `save_embedding_layers` to `True` as the embedding layer has been resized during finetuning." ) save_embedding_layers = True else: save_embedding_layers = False if save_embedding_layers and hasattr(model, "get_input_embeddings"): for layer in [model.get_input_embeddings(), model.get_output_embeddings()]: if not is_embedding_in_target_modules or has_valid_embedding_base_layer(layer): # support from version >= 0.6.2 embedding_module_name = get_embedding_layer_name(model, layer, is_embedding_in_target_modules) if embedding_module_name: to_return.update({k: v for k, v in state_dict.items() if embedding_module_name in k}) elif save_embedding_layers: warnings.warn("Could not identify embedding layer(s) because the model is not a 🤗 transformers model.") to_return = {k.replace(f".{adapter_name}", ""): v for k, v in to_return.items()} return to_return def set_peft_model_state_dict(model, peft_model_state_dict, adapter_name="default"): """ Set the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. peft_model_state_dict (`dict`): The state dict of the Peft model. """ config = model.peft_config[adapter_name] state_dict = {} if getattr(model, "modules_to_save", None) is not None: for key, value in peft_model_state_dict.items(): if any(module_name in key for module_name in model.modules_to_save): for module_name in model.modules_to_save: if module_name in key: key = key.replace(module_name, f"{module_name}.modules_to_save.{adapter_name}") break state_dict[key] = value else: state_dict = peft_model_state_dict if config.peft_type in ( PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.ADALORA, PeftType.IA3, PeftType.OFT, PeftType.POLY, ): peft_model_state_dict = {} parameter_prefix = { PeftType.IA3: "ia3_", PeftType.LORA: "lora_", PeftType.ADALORA: "lora_", PeftType.LOHA: "hada_", PeftType.LOKR: "lokr_", PeftType.OFT: "oft_", PeftType.POLY: "poly_", }[config.peft_type] for k, v in state_dict.items(): if parameter_prefix in k: suffix = k.split(parameter_prefix)[1] if "." in suffix: suffix_to_replace = ".".join(suffix.split(".")[1:]) k = k.replace(suffix_to_replace, f"{adapter_name}.{suffix_to_replace}") else: k = f"{k}.{adapter_name}" peft_model_state_dict[k] = v else: peft_model_state_dict[k] = v if config.peft_type == PeftType.ADALORA: rank_pattern = config.rank_pattern if rank_pattern is not None: model.resize_modules_by_rank_pattern(rank_pattern, adapter_name) elif config.is_prompt_learning or config.peft_type == PeftType.ADAPTION_PROMPT: peft_model_state_dict = state_dict else: raise NotImplementedError load_result = model.load_state_dict(peft_model_state_dict, strict=False) if config.is_prompt_learning: model.prompt_encoder[adapter_name].embedding.load_state_dict( {"weight": peft_model_state_dict["prompt_embeddings"]}, strict=True ) if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: model.prompt_encoder[adapter_name].load_state_dict(peft_model_state_dict, strict=False) return load_result def load_peft_weights(model_id: str, device: Optional[str] = None, **hf_hub_download_kwargs) -> dict: r""" A helper method to load the PEFT weights from the HuggingFace Hub or locally Args: model_id (`str`): The local path to the adapter weights or the name of the adapter to load from the HuggingFace Hub. device (`str`): The device to load the weights onto. hf_hub_download_kwargs (`dict`): Additional arguments to pass to the `hf_hub_download` method when loading from the HuggingFace Hub. """ path = ( os.path.join(model_id, hf_hub_download_kwargs["subfolder"]) if hf_hub_download_kwargs.get("subfolder", None) is not None else model_id ) if device is None: device = infer_device() if os.path.exists(os.path.join(path, SAFETENSORS_WEIGHTS_NAME)): filename = os.path.join(path, SAFETENSORS_WEIGHTS_NAME) use_safetensors = True elif os.path.exists(os.path.join(path, WEIGHTS_NAME)): filename = os.path.join(path, WEIGHTS_NAME) use_safetensors = False else: token = hf_hub_download_kwargs.get("token", None) if token is None: token = hf_hub_download_kwargs.get("use_auth_token", None) hub_filename = ( os.path.join(hf_hub_download_kwargs["subfolder"], SAFETENSORS_WEIGHTS_NAME) if hf_hub_download_kwargs.get("subfolder", None) is not None else SAFETENSORS_WEIGHTS_NAME ) has_remote_safetensors_file = file_exists( repo_id=model_id, filename=hub_filename, revision=hf_hub_download_kwargs.get("revision", None), repo_type=hf_hub_download_kwargs.get("repo_type", None), token=token, ) use_safetensors = has_remote_safetensors_file if has_remote_safetensors_file: # Priority 1: load safetensors weights filename = hf_hub_download( model_id, SAFETENSORS_WEIGHTS_NAME, **hf_hub_download_kwargs, ) else: try: filename = hf_hub_download(model_id, WEIGHTS_NAME, **hf_hub_download_kwargs) except EntryNotFoundError: raise ValueError( f"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. " f"Please check that the file {WEIGHTS_NAME} or {SAFETENSORS_WEIGHTS_NAME} is present at {model_id}." ) if use_safetensors: if hasattr(torch.backends, "mps") and (device == torch.device("mps")): adapters_weights = safe_load_file(filename, device="cpu") else: adapters_weights = safe_load_file(filename, device=device) else: adapters_weights = torch.load(filename, map_location=torch.device(device)) return adapters_weights
peft/src/peft/utils/save_and_load.py/0
{ "file_path": "peft/src/peft/utils/save_and_load.py", "repo_id": "peft", "token_count": 6533 }
164
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from peft import LoraConfig, get_peft_model_state_dict, inject_adapter_in_model from peft.utils import ModulesToSaveWrapper class DummyModel(torch.nn.Module): def __init__(self): super().__init__() self.embedding = torch.nn.Embedding(10, 10) self.linear = torch.nn.Linear(10, 10) self.lm_head = torch.nn.Linear(10, 10) def forward(self, input_ids): x = self.embedding(input_ids) x = self.linear(x) x = self.lm_head(x) return x class TestPeft(unittest.TestCase): def setUp(self): self.model = DummyModel() lora_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", target_modules=["linear"], ) self.model = inject_adapter_in_model(lora_config, self.model) def test_inject_adapter_in_model(self): dummy_inputs = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]) _ = self.model(dummy_inputs) for name, module in self.model.named_modules(): if name == "linear": self.assertTrue(hasattr(module, "lora_A")) self.assertTrue(hasattr(module, "lora_B")) def test_get_peft_model_state_dict(self): peft_state_dict = get_peft_model_state_dict(self.model) for key in peft_state_dict.keys(): self.assertTrue("lora" in key) def test_modules_to_save(self): self.model = DummyModel() lora_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", target_modules=["linear"], modules_to_save=["embedding"], ) self.model = inject_adapter_in_model(lora_config, self.model) for name, module in self.model.named_modules(): if name == "linear": self.assertTrue(hasattr(module, "lora_A")) self.assertTrue(hasattr(module, "lora_B")) elif name == "embedding": self.assertTrue(isinstance(module, ModulesToSaveWrapper)) state_dict = get_peft_model_state_dict(self.model) self.assertTrue("embedding.weight" in state_dict.keys()) self.assertTrue(hasattr(self.model.embedding, "weight"))
peft/tests/test_low_level_api.py/0
{ "file_path": "peft/tests/test_low_level_api.py", "repo_id": "peft", "token_count": 1311 }
165
#!/usr/bin/env python3 """ Model Benchmark Script An inference and train step benchmark script for timm models. Hacked together by Ross Wightman (https://github.com/rwightman) """ import argparse import csv import json import logging import time from collections import OrderedDict from contextlib import suppress from functools import partial import torch import torch.nn as nn import torch.nn.parallel from timm.data import resolve_data_config from timm.layers import set_fast_norm from timm.models import create_model, is_model, list_models from timm.optim import create_optimizer_v2 from timm.utils import setup_default_logging, set_jit_fuser, decay_batch_step, check_batch_size_retry, ParseKwargs,\ reparameterize_model has_apex = False try: from apex import amp has_apex = True except ImportError: pass has_native_amp = False try: if getattr(torch.cuda.amp, 'autocast') is not None: has_native_amp = True except AttributeError: pass try: from deepspeed.profiling.flops_profiler import get_model_profile has_deepspeed_profiling = True except ImportError as e: has_deepspeed_profiling = False try: from fvcore.nn import FlopCountAnalysis, flop_count_str, ActivationCountAnalysis has_fvcore_profiling = True except ImportError as e: FlopCountAnalysis = None has_fvcore_profiling = False try: from functorch.compile import memory_efficient_fusion has_functorch = True except ImportError as e: has_functorch = False has_compile = hasattr(torch, 'compile') if torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True _logger = logging.getLogger('validate') parser = argparse.ArgumentParser(description='PyTorch Benchmark') # benchmark specific args parser.add_argument('--model-list', metavar='NAME', default='', help='txt file based list of model names to benchmark') parser.add_argument('--bench', default='both', type=str, help="Benchmark mode. One of 'inference', 'train', 'both'. Defaults to 'both'") parser.add_argument('--detail', action='store_true', default=False, help='Provide train fwd/bwd/opt breakdown detail if True. Defaults to False') parser.add_argument('--no-retry', action='store_true', default=False, help='Do not decay batch size and retry on error.') parser.add_argument('--results-file', default='', type=str, help='Output csv file for validation results (summary)') parser.add_argument('--results-format', default='csv', type=str, help='Format for results file one of (csv, json) (default: csv).') parser.add_argument('--num-warm-iter', default=10, type=int, help='Number of warmup iterations (default: 10)') parser.add_argument('--num-bench-iter', default=40, type=int, help='Number of benchmark iterations (default: 40)') parser.add_argument('--device', default='cuda', type=str, help="device to run benchmark on") # common inference / train args parser.add_argument('--model', '-m', metavar='NAME', default='resnet50', help='model architecture (default: resnet50)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)') parser.add_argument('--img-size', default=None, type=int, metavar='N', help='Input image dimension, uses model default if empty') parser.add_argument('--input-size', default=None, nargs=3, type=int, metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') parser.add_argument('--use-train-size', action='store_true', default=False, help='Run inference at train size, not test-input-size if it exists.') parser.add_argument('--num-classes', type=int, default=None, help='Number classes in dataset') parser.add_argument('--gp', default=None, type=str, metavar='POOL', help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') parser.add_argument('--channels-last', action='store_true', default=False, help='Use channels_last memory layout') parser.add_argument('--grad-checkpointing', action='store_true', default=False, help='Enable gradient checkpointing through model blocks/stages') parser.add_argument('--amp', action='store_true', default=False, help='use PyTorch Native AMP for mixed precision training. Overrides --precision arg.') parser.add_argument('--amp-dtype', default='float16', type=str, help='lower precision AMP dtype (default: float16). Overrides --precision arg if args.amp True.') parser.add_argument('--precision', default='float32', type=str, help='Numeric precision. One of (amp, float32, float16, bfloat16, tf32)') parser.add_argument('--fuser', default='', type=str, help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')") parser.add_argument('--fast-norm', default=False, action='store_true', help='enable experimental fast-norm') parser.add_argument('--reparam', default=False, action='store_true', help='Reparameterize model') parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs) # codegen (model compilation) options scripting_group = parser.add_mutually_exclusive_group() scripting_group.add_argument('--torchscript', dest='torchscript', action='store_true', help='convert model torchscript for inference') scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor', help="Enable compilation w/ specified backend (default: inductor).") scripting_group.add_argument('--aot-autograd', default=False, action='store_true', help="Enable AOT Autograd optimization.") # train optimizer parameters parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', help='Optimizer (default: "sgd"') parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: None, use opt default)') parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='Optimizer momentum (default: 0.9)') parser.add_argument('--weight-decay', type=float, default=0.0001, help='weight decay (default: 0.0001)') parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--clip-mode', type=str, default='norm', help='Gradient clipping mode. One of ("norm", "value", "agc")') # model regularization / loss params that impact model or loss fn parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)') parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') parser.add_argument('--drop-path', type=float, default=None, metavar='PCT', help='Drop path rate (default: None)') parser.add_argument('--drop-block', type=float, default=None, metavar='PCT', help='Drop block rate (default: None)') def timestamp(sync=False): return time.perf_counter() def cuda_timestamp(sync=False, device=None): if sync: torch.cuda.synchronize(device=device) return time.perf_counter() def count_params(model: nn.Module): return sum([m.numel() for m in model.parameters()]) def resolve_precision(precision: str): assert precision in ('amp', 'amp_bfloat16', 'float16', 'bfloat16', 'float32') amp_dtype = None # amp disabled model_dtype = torch.float32 data_dtype = torch.float32 if precision == 'amp': amp_dtype = torch.float16 elif precision == 'amp_bfloat16': amp_dtype = torch.bfloat16 elif precision == 'float16': model_dtype = torch.float16 data_dtype = torch.float16 elif precision == 'bfloat16': model_dtype = torch.bfloat16 data_dtype = torch.bfloat16 return amp_dtype, model_dtype, data_dtype def profile_deepspeed(model, input_size=(3, 224, 224), batch_size=1, detailed=False): _, macs, _ = get_model_profile( model=model, input_shape=(batch_size,) + input_size, # input shape/resolution print_profile=detailed, # prints the model graph with the measured profile attached to each module detailed=detailed, # print the detailed profile warm_up=10, # the number of warm-ups before measuring the time of each module as_string=False, # print raw numbers (e.g. 1000) or as human-readable strings (e.g. 1k) output_file=None, # path to the output file. If None, the profiler prints to stdout. ignore_modules=None) # the list of modules to ignore in the profiling return macs, 0 # no activation count in DS def profile_fvcore(model, input_size=(3, 224, 224), batch_size=1, detailed=False, force_cpu=False): if force_cpu: model = model.to('cpu') device, dtype = next(model.parameters()).device, next(model.parameters()).dtype example_input = torch.ones((batch_size,) + input_size, device=device, dtype=dtype) fca = FlopCountAnalysis(model, example_input) aca = ActivationCountAnalysis(model, example_input) if detailed: fcs = flop_count_str(fca) print(fcs) return fca.total(), aca.total() class BenchmarkRunner: def __init__( self, model_name, detail=False, device='cuda', torchscript=False, torchcompile=None, aot_autograd=False, reparam=False, precision='float32', fuser='', num_warm_iter=10, num_bench_iter=50, use_train_size=False, **kwargs ): self.model_name = model_name self.detail = detail self.device = device self.amp_dtype, self.model_dtype, self.data_dtype = resolve_precision(precision) self.channels_last = kwargs.pop('channels_last', False) if self.amp_dtype is not None: self.amp_autocast = partial(torch.cuda.amp.autocast, dtype=self.amp_dtype) else: self.amp_autocast = suppress if fuser: set_jit_fuser(fuser) self.model = create_model( model_name, num_classes=kwargs.pop('num_classes', None), in_chans=3, global_pool=kwargs.pop('gp', 'fast'), scriptable=torchscript, drop_rate=kwargs.pop('drop', 0.), drop_path_rate=kwargs.pop('drop_path', None), drop_block_rate=kwargs.pop('drop_block', None), **kwargs.pop('model_kwargs', {}), ) if reparam: self.model = reparameterize_model(self.model) self.model.to( device=self.device, dtype=self.model_dtype, memory_format=torch.channels_last if self.channels_last else None, ) self.num_classes = self.model.num_classes self.param_count = count_params(self.model) _logger.info('Model %s created, param count: %d' % (model_name, self.param_count)) data_config = resolve_data_config(kwargs, model=self.model, use_test_size=not use_train_size) self.input_size = data_config['input_size'] self.batch_size = kwargs.pop('batch_size', 256) self.compiled = False if torchscript: self.model = torch.jit.script(self.model) self.compiled = True elif torchcompile: assert has_compile, 'A version of torch w/ torch.compile() is required, possibly a nightly.' torch._dynamo.reset() self.model = torch.compile(self.model, backend=torchcompile) self.compiled = True elif aot_autograd: assert has_functorch, "functorch is needed for --aot-autograd" self.model = memory_efficient_fusion(self.model) self.compiled = True self.example_inputs = None self.num_warm_iter = num_warm_iter self.num_bench_iter = num_bench_iter self.log_freq = num_bench_iter // 5 if 'cuda' in self.device: self.time_fn = partial(cuda_timestamp, device=self.device) else: self.time_fn = timestamp def _init_input(self): self.example_inputs = torch.randn( (self.batch_size,) + self.input_size, device=self.device, dtype=self.data_dtype) if self.channels_last: self.example_inputs = self.example_inputs.contiguous(memory_format=torch.channels_last) class InferenceBenchmarkRunner(BenchmarkRunner): def __init__( self, model_name, device='cuda', torchscript=False, **kwargs ): super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) self.model.eval() def run(self): def _step(): t_step_start = self.time_fn() with self.amp_autocast(): output = self.model(self.example_inputs) t_step_end = self.time_fn(True) return t_step_end - t_step_start _logger.info( f'Running inference benchmark on {self.model_name} for {self.num_bench_iter} steps w/ ' f'input size {self.input_size} and batch size {self.batch_size}.') with torch.no_grad(): self._init_input() for _ in range(self.num_warm_iter): _step() total_step = 0. num_samples = 0 t_run_start = self.time_fn() for i in range(self.num_bench_iter): delta_fwd = _step() total_step += delta_fwd num_samples += self.batch_size num_steps = i + 1 if num_steps % self.log_freq == 0: _logger.info( f"Infer [{num_steps}/{self.num_bench_iter}]." f" {num_samples / total_step:0.2f} samples/sec." f" {1000 * total_step / num_steps:0.3f} ms/step.") t_run_end = self.time_fn(True) t_run_elapsed = t_run_end - t_run_start results = dict( samples_per_sec=round(num_samples / t_run_elapsed, 2), step_time=round(1000 * total_step / self.num_bench_iter, 3), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1e6, 2), ) retries = 0 if self.compiled else 2 # skip profiling if model is scripted while retries: retries -= 1 try: if has_deepspeed_profiling: macs, _ = profile_deepspeed(self.model, self.input_size) results['gmacs'] = round(macs / 1e9, 2) elif has_fvcore_profiling: macs, activations = profile_fvcore(self.model, self.input_size, force_cpu=not retries) results['gmacs'] = round(macs / 1e9, 2) results['macts'] = round(activations / 1e6, 2) except RuntimeError as e: pass _logger.info( f"Inference benchmark of {self.model_name} done. " f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/step") return results class TrainBenchmarkRunner(BenchmarkRunner): def __init__( self, model_name, device='cuda', torchscript=False, **kwargs ): super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) self.model.train() self.loss = nn.CrossEntropyLoss().to(self.device) self.target_shape = tuple() self.optimizer = create_optimizer_v2( self.model, opt=kwargs.pop('opt', 'sgd'), lr=kwargs.pop('lr', 1e-4)) if kwargs.pop('grad_checkpointing', False): self.model.set_grad_checkpointing() def _gen_target(self, batch_size): return torch.empty( (batch_size,) + self.target_shape, device=self.device, dtype=torch.long).random_(self.num_classes) def run(self): def _step(detail=False): self.optimizer.zero_grad() # can this be ignored? t_start = self.time_fn() t_fwd_end = t_start t_bwd_end = t_start with self.amp_autocast(): output = self.model(self.example_inputs) if isinstance(output, tuple): output = output[0] if detail: t_fwd_end = self.time_fn(True) target = self._gen_target(output.shape[0]) self.loss(output, target).backward() if detail: t_bwd_end = self.time_fn(True) self.optimizer.step() t_end = self.time_fn(True) if detail: delta_fwd = t_fwd_end - t_start delta_bwd = t_bwd_end - t_fwd_end delta_opt = t_end - t_bwd_end return delta_fwd, delta_bwd, delta_opt else: delta_step = t_end - t_start return delta_step _logger.info( f'Running train benchmark on {self.model_name} for {self.num_bench_iter} steps w/ ' f'input size {self.input_size} and batch size {self.batch_size}.') self._init_input() for _ in range(self.num_warm_iter): _step() t_run_start = self.time_fn() if self.detail: total_fwd = 0. total_bwd = 0. total_opt = 0. num_samples = 0 for i in range(self.num_bench_iter): delta_fwd, delta_bwd, delta_opt = _step(True) num_samples += self.batch_size total_fwd += delta_fwd total_bwd += delta_bwd total_opt += delta_opt num_steps = (i + 1) if num_steps % self.log_freq == 0: total_step = total_fwd + total_bwd + total_opt _logger.info( f"Train [{num_steps}/{self.num_bench_iter}]." f" {num_samples / total_step:0.2f} samples/sec." f" {1000 * total_fwd / num_steps:0.3f} ms/step fwd," f" {1000 * total_bwd / num_steps:0.3f} ms/step bwd," f" {1000 * total_opt / num_steps:0.3f} ms/step opt." ) total_step = total_fwd + total_bwd + total_opt t_run_elapsed = self.time_fn() - t_run_start results = dict( samples_per_sec=round(num_samples / t_run_elapsed, 2), step_time=round(1000 * total_step / self.num_bench_iter, 3), fwd_time=round(1000 * total_fwd / self.num_bench_iter, 3), bwd_time=round(1000 * total_bwd / self.num_bench_iter, 3), opt_time=round(1000 * total_opt / self.num_bench_iter, 3), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1e6, 2), ) else: total_step = 0. num_samples = 0 for i in range(self.num_bench_iter): delta_step = _step(False) num_samples += self.batch_size total_step += delta_step num_steps = (i + 1) if num_steps % self.log_freq == 0: _logger.info( f"Train [{num_steps}/{self.num_bench_iter}]." f" {num_samples / total_step:0.2f} samples/sec." f" {1000 * total_step / num_steps:0.3f} ms/step.") t_run_elapsed = self.time_fn() - t_run_start results = dict( samples_per_sec=round(num_samples / t_run_elapsed, 2), step_time=round(1000 * total_step / self.num_bench_iter, 3), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1e6, 2), ) _logger.info( f"Train benchmark of {self.model_name} done. " f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/sample") return results class ProfileRunner(BenchmarkRunner): def __init__(self, model_name, device='cuda', profiler='', **kwargs): super().__init__(model_name=model_name, device=device, **kwargs) if not profiler: if has_deepspeed_profiling: profiler = 'deepspeed' elif has_fvcore_profiling: profiler = 'fvcore' assert profiler, "One of deepspeed or fvcore needs to be installed for profiling to work." self.profiler = profiler self.model.eval() def run(self): _logger.info( f'Running profiler on {self.model_name} w/ ' f'input size {self.input_size} and batch size {self.batch_size}.') macs = 0 activations = 0 if self.profiler == 'deepspeed': macs, _ = profile_deepspeed(self.model, self.input_size, batch_size=self.batch_size, detailed=True) elif self.profiler == 'fvcore': macs, activations = profile_fvcore(self.model, self.input_size, batch_size=self.batch_size, detailed=True) results = dict( gmacs=round(macs / 1e9, 2), macts=round(activations / 1e6, 2), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1e6, 2), ) _logger.info( f"Profile of {self.model_name} done. " f"{results['gmacs']:.2f} GMACs, {results['param_count']:.2f} M params.") return results def _try_run( model_name, bench_fn, bench_kwargs, initial_batch_size, no_batch_size_retry=False ): batch_size = initial_batch_size results = dict() error_str = 'Unknown' while batch_size: try: torch.cuda.empty_cache() bench = bench_fn(model_name=model_name, batch_size=batch_size, **bench_kwargs) results = bench.run() return results except RuntimeError as e: error_str = str(e) _logger.error(f'"{error_str}" while running benchmark.') if not check_batch_size_retry(error_str): _logger.error(f'Unrecoverable error encountered while benchmarking {model_name}, skipping.') break if no_batch_size_retry: break batch_size = decay_batch_step(batch_size) _logger.warning(f'Reducing batch size to {batch_size} for retry.') results['error'] = error_str return results def benchmark(args): if args.amp: _logger.warning("Overriding precision to 'amp' since --amp flag set.") args.precision = 'amp' if args.amp_dtype == 'float16' else '_'.join(['amp', args.amp_dtype]) _logger.info(f'Benchmarking in {args.precision} precision. ' f'{"NHWC" if args.channels_last else "NCHW"} layout. ' f'torchscript {"enabled" if args.torchscript else "disabled"}') bench_kwargs = vars(args).copy() bench_kwargs.pop('amp') model = bench_kwargs.pop('model') batch_size = bench_kwargs.pop('batch_size') bench_fns = (InferenceBenchmarkRunner,) prefixes = ('infer',) if args.bench == 'both': bench_fns = ( InferenceBenchmarkRunner, TrainBenchmarkRunner ) prefixes = ('infer', 'train') elif args.bench == 'train': bench_fns = TrainBenchmarkRunner, prefixes = 'train', elif args.bench.startswith('profile'): # specific profiler used if included in bench mode string, otherwise default to deepspeed, fallback to fvcore if 'deepspeed' in args.bench: assert has_deepspeed_profiling, "deepspeed must be installed to use deepspeed flop counter" bench_kwargs['profiler'] = 'deepspeed' elif 'fvcore' in args.bench: assert has_fvcore_profiling, "fvcore must be installed to use fvcore flop counter" bench_kwargs['profiler'] = 'fvcore' bench_fns = ProfileRunner, batch_size = 1 model_results = OrderedDict(model=model) for prefix, bench_fn in zip(prefixes, bench_fns): run_results = _try_run( model, bench_fn, bench_kwargs=bench_kwargs, initial_batch_size=batch_size, no_batch_size_retry=args.no_retry, ) if prefix and 'error' not in run_results: run_results = {'_'.join([prefix, k]): v for k, v in run_results.items()} model_results.update(run_results) if 'error' in run_results: break if 'error' not in model_results: param_count = model_results.pop('infer_param_count', model_results.pop('train_param_count', 0)) model_results.setdefault('param_count', param_count) model_results.pop('train_param_count', 0) return model_results def main(): setup_default_logging() args = parser.parse_args() model_cfgs = [] model_names = [] if args.fast_norm: set_fast_norm() if args.model_list: args.model = '' with open(args.model_list) as f: model_names = [line.rstrip() for line in f] model_cfgs = [(n, None) for n in model_names] elif args.model == 'all': # validate all models in a list of names with pretrained checkpoints args.pretrained = True model_names = list_models(pretrained=True, exclude_filters=['*in21k']) model_cfgs = [(n, None) for n in model_names] elif not is_model(args.model): # model name doesn't exist, try as wildcard filter model_names = list_models(args.model) model_cfgs = [(n, None) for n in model_names] if len(model_cfgs): _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) results = [] try: for m, _ in model_cfgs: if not m: continue args.model = m r = benchmark(args) if r: results.append(r) time.sleep(10) except KeyboardInterrupt as e: pass sort_key = 'infer_samples_per_sec' if 'train' in args.bench: sort_key = 'train_samples_per_sec' elif 'profile' in args.bench: sort_key = 'infer_gmacs' results = filter(lambda x: sort_key in x, results) results = sorted(results, key=lambda x: x[sort_key], reverse=True) else: results = benchmark(args) if args.results_file: write_results(args.results_file, results, format=args.results_format) # output results in JSON to stdout w/ delimiter for runner script print(f'--result\n{json.dumps(results, indent=4)}') def write_results(results_file, results, format='csv'): with open(results_file, mode='w') as cf: if format == 'json': json.dump(results, cf, indent=4) else: if not isinstance(results, (list, tuple)): results = [results] if not results: return dw = csv.DictWriter(cf, fieldnames=results[0].keys()) dw.writeheader() for r in results: dw.writerow(r) cf.flush() if __name__ == '__main__': main()
pytorch-image-models/benchmark.py/0
{ "file_path": "pytorch-image-models/benchmark.py", "repo_id": "pytorch-image-models", "token_count": 13272 }
166
# AdvProp (EfficientNet) **AdvProp** is an adversarial training scheme which treats adversarial examples as additional examples, to prevent overfitting. Key to the method is the usage of a separate auxiliary batch norm for adversarial examples, as they have different underlying distributions to normal examples. The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{xie2020adversarial, title={Adversarial Examples Improve Image Recognition}, author={Cihang Xie and Mingxing Tan and Boqing Gong and Jiang Wang and Alan Yuille and Quoc V. Le}, year={2020}, eprint={1911.09665}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: AdvProp Paper: Title: Adversarial Examples Improve Image Recognition URL: https://paperswithcode.com/paper/adversarial-examples-improve-image Models: - Name: tf_efficientnet_b0_ap In Collection: AdvProp Metadata: FLOPs: 488688572 Parameters: 5290000 File Size: 21385973 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b0_ap LR: 0.256 Epochs: 350 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 2048 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1334 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.1% Top 5 Accuracy: 93.26% - Name: tf_efficientnet_b1_ap In Collection: AdvProp Metadata: FLOPs: 883633200 Parameters: 7790000 File Size: 31515350 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b1_ap LR: 0.256 Epochs: 350 Crop Pct: '0.882' Momentum: 0.9 Batch Size: 2048 Image Size: '240' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1344 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.28% Top 5 Accuracy: 94.3% - Name: tf_efficientnet_b2_ap In Collection: AdvProp Metadata: FLOPs: 1234321170 Parameters: 9110000 File Size: 36800745 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b2_ap LR: 0.256 Epochs: 350 Crop Pct: '0.89' Momentum: 0.9 Batch Size: 2048 Image Size: '260' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1354 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.3% Top 5 Accuracy: 95.03% - Name: tf_efficientnet_b3_ap In Collection: AdvProp Metadata: FLOPs: 2275247568 Parameters: 12230000 File Size: 49384538 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b3_ap LR: 0.256 Epochs: 350 Crop Pct: '0.904' Momentum: 0.9 Batch Size: 2048 Image Size: '300' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1364 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.82% Top 5 Accuracy: 95.62% - Name: tf_efficientnet_b4_ap In Collection: AdvProp Metadata: FLOPs: 5749638672 Parameters: 19340000 File Size: 77993585 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b4_ap LR: 0.256 Epochs: 350 Crop Pct: '0.922' Momentum: 0.9 Batch Size: 2048 Image Size: '380' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1374 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.26% Top 5 Accuracy: 96.39% - Name: tf_efficientnet_b5_ap In Collection: AdvProp Metadata: FLOPs: 13176501888 Parameters: 30390000 File Size: 122403150 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b5_ap LR: 0.256 Epochs: 350 Crop Pct: '0.934' Momentum: 0.9 Batch Size: 2048 Image Size: '456' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1384 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.25% Top 5 Accuracy: 96.97% - Name: tf_efficientnet_b6_ap In Collection: AdvProp Metadata: FLOPs: 24180518488 Parameters: 43040000 File Size: 173237466 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b6_ap LR: 0.256 Epochs: 350 Crop Pct: '0.942' Momentum: 0.9 Batch Size: 2048 Image Size: '528' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1394 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.79% Top 5 Accuracy: 97.14% - Name: tf_efficientnet_b7_ap In Collection: AdvProp Metadata: FLOPs: 48205304880 Parameters: 66349999 File Size: 266850607 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b7_ap LR: 0.256 Epochs: 350 Crop Pct: '0.949' Momentum: 0.9 Batch Size: 2048 Image Size: '600' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1405 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 85.12% Top 5 Accuracy: 97.25% - Name: tf_efficientnet_b8_ap In Collection: AdvProp Metadata: FLOPs: 80962956270 Parameters: 87410000 File Size: 351412563 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b8_ap LR: 0.128 Epochs: 350 Crop Pct: '0.954' Momentum: 0.9 Batch Size: 2048 Image Size: '672' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1416 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 85.37% Top 5 Accuracy: 97.3% -->
pytorch-image-models/docs/models/.templates/models/advprop.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/advprop.md", "repo_id": "pytorch-image-models", "token_count": 5211 }
167
# (Gluon) ResNeXt A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/XieGDTH16, author = {Saining Xie and Ross B. Girshick and Piotr Doll{\'{a}}r and Zhuowen Tu and Kaiming He}, title = {Aggregated Residual Transformations for Deep Neural Networks}, journal = {CoRR}, volume = {abs/1611.05431}, year = {2016}, url = {http://arxiv.org/abs/1611.05431}, archivePrefix = {arXiv}, eprint = {1611.05431}, timestamp = {Mon, 13 Aug 2018 16:45:58 +0200}, biburl = {https://dblp.org/rec/journals/corr/XieGDTH16.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: Gloun ResNeXt Paper: Title: Aggregated Residual Transformations for Deep Neural Networks URL: https://paperswithcode.com/paper/aggregated-residual-transformations-for-deep Models: - Name: gluon_resnext101_32x4d In Collection: Gloun ResNeXt Metadata: FLOPs: 10298145792 Parameters: 44180000 File Size: 177367414 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnext101_32x4d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L193 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_32x4d-b253c8c4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.33% Top 5 Accuracy: 94.91% - Name: gluon_resnext101_64x4d In Collection: Gloun ResNeXt Metadata: FLOPs: 19954172928 Parameters: 83460000 File Size: 334737852 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnext101_64x4d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L201 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_64x4d-f9a8e184.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.63% Top 5 Accuracy: 95.0% - Name: gluon_resnext50_32x4d In Collection: Gloun ResNeXt Metadata: FLOPs: 5472648192 Parameters: 25030000 File Size: 100441719 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnext50_32x4d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L185 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext50_32x4d-e6a097c1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.35% Top 5 Accuracy: 94.42% -->
pytorch-image-models/docs/models/.templates/models/gloun-resnext.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/gloun-resnext.md", "repo_id": "pytorch-image-models", "token_count": 1879 }
168
# NASNet **NASNet** is a type of convolutional neural network discovered through neural architecture search. The building blocks consist of normal and reduction cells. {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{zoph2018learning, title={Learning Transferable Architectures for Scalable Image Recognition}, author={Barret Zoph and Vijay Vasudevan and Jonathon Shlens and Quoc V. Le}, year={2018}, eprint={1707.07012}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: NASNet Paper: Title: Learning Transferable Architectures for Scalable Image Recognition URL: https://paperswithcode.com/paper/learning-transferable-architectures-for Models: - Name: nasnetalarge In Collection: NASNet Metadata: FLOPs: 30242402862 Parameters: 88750000 File Size: 356056626 Architecture: - Average Pooling - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - ReLU Tasks: - Image Classification Training Techniques: - Label Smoothing - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 50x Tesla K40 GPUs ID: nasnetalarge Dropout: 0.5 Crop Pct: '0.911' Momentum: 0.9 Image Size: '331' Interpolation: bicubic Label Smoothing: 0.1 RMSProp $\epsilon$: 1.0 Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/nasnet.py#L562 Weights: http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.63% Top 5 Accuracy: 96.05% -->
pytorch-image-models/docs/models/.templates/models/nasnet.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/nasnet.md", "repo_id": "pytorch-image-models", "token_count": 730 }
169
# SK-ResNeXt **SK ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNext are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner. {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{li2019selective, title={Selective Kernel Networks}, author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang}, year={2019}, eprint={1903.06586}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: SKResNeXt Paper: Title: Selective Kernel Networks URL: https://paperswithcode.com/paper/selective-kernel-networks Models: - Name: skresnext50_32x4d In Collection: SKResNeXt Metadata: FLOPs: 5739845824 Parameters: 27480000 File Size: 110340975 Architecture: - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - Max Pooling - Residual Connection - Selective Kernel - Softmax Tasks: - Image Classification Training Data: - ImageNet Training Resources: 8x GPUs ID: skresnext50_32x4d LR: 0.1 Epochs: 100 Layers: 50 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L210 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.15% Top 5 Accuracy: 94.64% -->
pytorch-image-models/docs/models/.templates/models/skresnext.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/skresnext.md", "repo_id": "pytorch-image-models", "token_count": 822 }
170
# CSP-ResNeXt **CSPResNeXt** is a convolutional neural network where we apply the Cross Stage Partial Network (CSPNet) approach to [ResNeXt](https://paperswithcode.com/method/resnext). The CSPNet partitions the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('cspresnext50', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `cspresnext50`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('cspresnext50', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @misc{wang2019cspnet, title={CSPNet: A New Backbone that can Enhance Learning Capability of CNN}, author={Chien-Yao Wang and Hong-Yuan Mark Liao and I-Hau Yeh and Yueh-Hua Wu and Ping-Yang Chen and Jun-Wei Hsieh}, year={2019}, eprint={1911.11929}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: CSP ResNeXt Paper: Title: 'CSPNet: A New Backbone that can Enhance Learning Capability of CNN' URL: https://paperswithcode.com/paper/cspnet-a-new-backbone-that-can-enhance Models: - Name: cspresnext50 In Collection: CSP ResNeXt Metadata: FLOPs: 3962945536 Parameters: 20570000 File Size: 82562887 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - Label Smoothing - Polynomial Learning Rate Decay - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 1x GPU ID: cspresnext50 LR: 0.1 Layers: 50 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 128 Image Size: '224' Weight Decay: 0.005 Interpolation: bilinear Training Steps: 8000000 Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/cspnet.py#L430 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnext50_ra_224-648b4713.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.05% Top 5 Accuracy: 94.94% -->
pytorch-image-models/hfdocs/source/models/csp-resnext.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/csp-resnext.mdx", "repo_id": "pytorch-image-models", "token_count": 1725 }
171
# HRNet **HRNet**, or **High-Resolution Net**, is a general purpose convolutional neural network for tasks like semantic segmentation, object detection and image classification. It is able to maintain high resolution representations through the whole process. We start from a high-resolution convolution stream, gradually add high-to-low resolution convolution streams one by one, and connect the multi-resolution streams in parallel. The resulting network consists of several (\\( 4 \\) in the paper) stages and the \\( n \\)th stage contains \\( n \\) streams corresponding to \\( n \\) resolutions. The authors conduct repeated multi-resolution fusions by exchanging the information across the parallel streams over and over. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('hrnet_w18', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `hrnet_w18`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('hrnet_w18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @misc{sun2019highresolution, title={High-Resolution Representations for Labeling Pixels and Regions}, author={Ke Sun and Yang Zhao and Borui Jiang and Tianheng Cheng and Bin Xiao and Dong Liu and Yadong Mu and Xinggang Wang and Wenyu Liu and Jingdong Wang}, year={2019}, eprint={1904.04514}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: HRNet Paper: Title: Deep High-Resolution Representation Learning for Visual Recognition URL: https://paperswithcode.com/paper/190807919 Models: - Name: hrnet_w18 In Collection: HRNet Metadata: FLOPs: 5547205500 Parameters: 21300000 File Size: 85718883 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w18 Epochs: 100 Layers: 18 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L800 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w18-8cb57bb9.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.76% Top 5 Accuracy: 93.44% - Name: hrnet_w18_small In Collection: HRNet Metadata: FLOPs: 2071651488 Parameters: 13190000 File Size: 52934302 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w18_small Epochs: 100 Layers: 18 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L790 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v1-f460c6bc.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 72.34% Top 5 Accuracy: 90.68% - Name: hrnet_w18_small_v2 In Collection: HRNet Metadata: FLOPs: 3360023160 Parameters: 15600000 File Size: 62682879 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w18_small_v2 Epochs: 100 Layers: 18 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L795 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v2-4c50a8cb.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.11% Top 5 Accuracy: 92.41% - Name: hrnet_w30 In Collection: HRNet Metadata: FLOPs: 10474119492 Parameters: 37710000 File Size: 151452218 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w30 Epochs: 100 Layers: 30 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L805 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w30-8d7f8dab.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.21% Top 5 Accuracy: 94.22% - Name: hrnet_w32 In Collection: HRNet Metadata: FLOPs: 11524528320 Parameters: 41230000 File Size: 165547812 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs Training Time: 60 hours ID: hrnet_w32 Epochs: 100 Layers: 32 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L810 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w32-90d8c5fb.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.45% Top 5 Accuracy: 94.19% - Name: hrnet_w40 In Collection: HRNet Metadata: FLOPs: 16381182192 Parameters: 57560000 File Size: 230899236 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w40 Epochs: 100 Layers: 40 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L815 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w40-7cd397a4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.93% Top 5 Accuracy: 94.48% - Name: hrnet_w44 In Collection: HRNet Metadata: FLOPs: 19202520264 Parameters: 67060000 File Size: 268957432 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w44 Epochs: 100 Layers: 44 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L820 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w44-c9ac8c18.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.89% Top 5 Accuracy: 94.37% - Name: hrnet_w48 In Collection: HRNet Metadata: FLOPs: 22285865760 Parameters: 77470000 File Size: 310603710 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs Training Time: 80 hours ID: hrnet_w48 Epochs: 100 Layers: 48 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L825 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w48-abd2e6ab.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.32% Top 5 Accuracy: 94.51% - Name: hrnet_w64 In Collection: HRNet Metadata: FLOPs: 37239321984 Parameters: 128060000 File Size: 513071818 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w64 Epochs: 100 Layers: 64 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L830 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w64-b47cc881.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.46% Top 5 Accuracy: 94.65% -->
pytorch-image-models/hfdocs/source/models/hrnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/hrnet.mdx", "repo_id": "pytorch-image-models", "token_count": 5056 }
172
# RegNetY **RegNetY** is a convolutional network design space with simple, regular models with parameters: depth \\( d \\), initial width \\( w\_{0} > 0 \\), and slope \\( w\_{a} > 0 \\), and generates a different block width \\( u\_{j} \\) for each block \\( j < d \\). The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure): \\( \\) u\_{j} = w\_{0} + w\_{a}\cdot{j} \\( \\) For **RegNetX** authors have additional restrictions: we set \\( b = 1 \\) (the bottleneck ratio), \\( 12 \leq d \leq 28 \\), and \\( w\_{m} \geq 2 \\) (the width multiplier). For **RegNetY** authors make one change, which is to include [Squeeze-and-Excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('regnety_002', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `regnety_002`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('regnety_002', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @misc{radosavovic2020designing, title={Designing Network Design Spaces}, author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár}, year={2020}, eprint={2003.13678}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: RegNetY Paper: Title: Designing Network Design Spaces URL: https://paperswithcode.com/paper/designing-network-design-spaces Models: - Name: regnety_002 In Collection: RegNetY Metadata: FLOPs: 255754236 Parameters: 3160000 File Size: 12782926 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_002 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L409 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_002-e68ca334.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 70.28% Top 5 Accuracy: 89.55% - Name: regnety_004 In Collection: RegNetY Metadata: FLOPs: 515664568 Parameters: 4340000 File Size: 17542753 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_004 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L415 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_004-0db870e6.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 74.02% Top 5 Accuracy: 91.76% - Name: regnety_006 In Collection: RegNetY Metadata: FLOPs: 771746928 Parameters: 6060000 File Size: 24394127 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_006 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L421 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_006-c67e57ec.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.27% Top 5 Accuracy: 92.53% - Name: regnety_008 In Collection: RegNetY Metadata: FLOPs: 1023448952 Parameters: 6260000 File Size: 25223268 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_008 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L427 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_008-dc900dbe.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.32% Top 5 Accuracy: 93.07% - Name: regnety_016 In Collection: RegNetY Metadata: FLOPs: 2070895094 Parameters: 11200000 File Size: 45115589 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_016 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L433 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_016-54367f74.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.87% Top 5 Accuracy: 93.73% - Name: regnety_032 In Collection: RegNetY Metadata: FLOPs: 4081118714 Parameters: 19440000 File Size: 78084523 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_032 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L439 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/regnety_032_ra-7f2439f9.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.01% Top 5 Accuracy: 95.91% - Name: regnety_040 In Collection: RegNetY Metadata: FLOPs: 5105933432 Parameters: 20650000 File Size: 82913909 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_040 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L445 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_040-f0d569f9.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.23% Top 5 Accuracy: 94.64% - Name: regnety_064 In Collection: RegNetY Metadata: FLOPs: 8167730444 Parameters: 30580000 File Size: 122751416 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_064 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L451 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_064-0a48325c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.73% Top 5 Accuracy: 94.76% - Name: regnety_080 In Collection: RegNetY Metadata: FLOPs: 10233621420 Parameters: 39180000 File Size: 157124671 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_080 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L457 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_080-e7f3eb93.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.87% Top 5 Accuracy: 94.83% - Name: regnety_120 In Collection: RegNetY Metadata: FLOPs: 15542094856 Parameters: 51820000 File Size: 207743949 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_120 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L463 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_120-721ba79a.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.38% Top 5 Accuracy: 95.12% - Name: regnety_160 In Collection: RegNetY Metadata: FLOPs: 20450196852 Parameters: 83590000 File Size: 334916722 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_160 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L469 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_160-d64013cd.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.28% Top 5 Accuracy: 94.97% - Name: regnety_320 In Collection: RegNetY Metadata: FLOPs: 41492618394 Parameters: 145050000 File Size: 580891965 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_320 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L475 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_320-ba464b29.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.8% Top 5 Accuracy: 95.25% -->
pytorch-image-models/hfdocs/source/models/regnety.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/regnety.mdx", "repo_id": "pytorch-image-models", "token_count": 6770 }
173
# SWSL ResNeXt A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) \\( C \\), as an essential factor in addition to the dimensions of depth and width. The models in this collection utilise semi-weakly supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('swsl_resnext101_32x16d', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `swsl_resnext101_32x16d`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('swsl_resnext101_32x16d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/abs-1905-00546, author = {I. Zeki Yalniz and Herv{\'{e}} J{\'{e}}gou and Kan Chen and Manohar Paluri and Dhruv Mahajan}, title = {Billion-scale semi-supervised learning for image classification}, journal = {CoRR}, volume = {abs/1905.00546}, year = {2019}, url = {http://arxiv.org/abs/1905.00546}, archivePrefix = {arXiv}, eprint = {1905.00546}, timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: SWSL ResNext Paper: Title: Billion-scale semi-supervised learning for image classification URL: https://paperswithcode.com/paper/billion-scale-semi-supervised-learning-for Models: - Name: swsl_resnext101_32x16d In Collection: SWSL ResNext Metadata: FLOPs: 46623691776 Parameters: 194030000 File Size: 777518664 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnext101_32x16d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L1009 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.34% Top 5 Accuracy: 96.84% - Name: swsl_resnext101_32x4d In Collection: SWSL ResNext Metadata: FLOPs: 10298145792 Parameters: 44180000 File Size: 177341913 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnext101_32x4d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L987 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.22% Top 5 Accuracy: 96.77% - Name: swsl_resnext101_32x8d In Collection: SWSL ResNext Metadata: FLOPs: 21180417024 Parameters: 88790000 File Size: 356056638 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnext101_32x8d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L998 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.27% Top 5 Accuracy: 97.17% - Name: swsl_resnext50_32x4d In Collection: SWSL ResNext Metadata: FLOPs: 5472648192 Parameters: 25030000 File Size: 100428550 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnext50_32x4d LR: 0.0015 Epochs: 30 Layers: 50 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L976 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.17% Top 5 Accuracy: 96.23% -->
pytorch-image-models/hfdocs/source/models/swsl-resnext.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/swsl-resnext.mdx", "repo_id": "pytorch-image-models", "token_count": 3472 }
174
# Scripts A train, validation, inference, and checkpoint cleaning script included in the github root folder. Scripts are not currently packaged in the pip release. The training and validation scripts evolved from early versions of the [PyTorch Imagenet Examples](https://github.com/pytorch/examples). I have added significant functionality over time, including CUDA specific performance enhancements based on [NVIDIA's APEX Examples](https://github.com/NVIDIA/apex/tree/master/examples). ## Training Script The variety of training args is large and not all combinations of options (or even options) have been fully tested. For the training dataset folder, specify the folder to the base that contains a `train` and `validation` folder. To train an SE-ResNet34 on ImageNet, locally distributed, 4 GPUs, one process per GPU w/ cosine schedule, random-erasing prob of 50% and per-pixel random value: ```bash ./distributed_train.sh 4 /data/imagenet --model seresnet34 --sched cosine --epochs 150 --warmup-epochs 5 --lr 0.4 --reprob 0.5 --remode pixel --batch-size 256 --amp -j 4 ``` <Tip> It is recommended to use PyTorch 1.9+ w/ PyTorch native AMP and DDP instead of APEX AMP. --amp defaults to native AMP as of timm ver 0.4.3. --apex-amp will force use of APEX components if they are installed. </Tip> ## Validation / Inference Scripts Validation and inference scripts are similar in usage. One outputs metrics on a validation set and the other outputs topk class ids in a csv. Specify the folder containing validation images, not the base as in training script. To validate with the model's pretrained weights (if they exist): ```bash python validate.py /imagenet/validation/ --model seresnext26_32x4d --pretrained ``` To run inference from a checkpoint: ```bash python inference.py /imagenet/validation/ --model mobilenetv3_large_100 --checkpoint ./output/train/model_best.pth.tar ``` ## Training Examples ### EfficientNet-B2 with RandAugment - 80.4 top-1, 95.1 top-5 These params are for dual Titan RTX cards with NVIDIA Apex installed: ```bash ./distributed_train.sh 2 /imagenet/ --model efficientnet_b2 -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.3 --drop-path 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .016 ``` ### MixNet-XL with RandAugment - 80.5 top-1, 94.9 top-5 This params are for dual Titan RTX cards with NVIDIA Apex installed: ```bash ./distributed_train.sh 2 /imagenet/ --model mixnet_xl -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .969 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.3 --drop-path 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.3 --amp --lr .016 --dist-bn reduce ``` ### SE-ResNeXt-26-D and SE-ResNeXt-26-T These hparams (or similar) work well for a wide range of ResNet architecture, generally a good idea to increase the epoch # as the model size increases... ie approx 180-200 for ResNe(X)t50, and 220+ for larger. Increase batch size and LR proportionally for better GPUs or with AMP enabled. These params were for 2 1080Ti cards: ```bash ./distributed_train.sh 2 /imagenet/ --model seresnext26t_32x4d --lr 0.1 --warmup-epochs 5 --epochs 160 --weight-decay 1e-4 --sched cosine --reprob 0.4 --remode pixel -b 112 ``` ### EfficientNet-B3 with RandAugment - 81.5 top-1, 95.7 top-5 The training of this model started with the same command line as EfficientNet-B2 w/ RA above. After almost three weeks of training the process crashed. The results weren't looking amazing so I resumed the training several times with tweaks to a few params (increase RE prob, decrease rand-aug, increase ema-decay). Nothing looked great. I ended up averaging the best checkpoints from all restarts. The result is mediocre at default res/crop but oddly performs much better with a full image test crop of 1.0. ### EfficientNet-B0 with RandAugment - 77.7 top-1, 95.3 top-5 [Michael Klachko](https://github.com/michaelklachko) achieved these results with the command line for B2 adapted for larger batch size, with the recommended B0 dropout rate of 0.2. ```bash ./distributed_train.sh 2 /imagenet/ --model efficientnet_b0 -b 384 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-path 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .048 ``` ### ResNet50 with JSD loss and RandAugment (clean + 2x RA augs) - 79.04 top-1, 94.39 top-5 Trained on two older 1080Ti cards, this took a while. Only slightly, non statistically better ImageNet validation result than my first good AugMix training of 78.99. However, these weights are more robust on tests with ImageNetV2, ImageNet-Sketch, etc. Unlike my first AugMix runs, I've enabled SplitBatchNorm, disabled random erasing on the clean split, and cranked up random erasing prob on the 2 augmented paths. ```bash ./distributed_train.sh 2 /imagenet -b 64 --model resnet50 --sched cosine --epochs 200 --lr 0.05 --amp --remode pixel --reprob 0.6 --aug-splits 3 --aa rand-m9-mstd0.5-inc1 --resplit --split-bn --jsd --dist-bn reduce ``` ### EfficientNet-ES (EdgeTPU-Small) with RandAugment - 78.066 top-1, 93.926 top-5 Trained by [Andrew Lavin](https://github.com/andravin) with 8 V100 cards. Model EMA was not used, final checkpoint is the average of 8 best checkpoints during training. ```bash ./distributed_train.sh 8 /imagenet --model efficientnet_es -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-path 0.2 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .064 ``` ### MobileNetV3-Large-100 - 75.766 top-1, 92,542 top-5 ```bash ./distributed_train.sh 2 /imagenet/ --model mobilenetv3_large_100 -b 512 --sched step --epochs 600 --decay-epochs 2.4 --decay-rate .973 --opt rmsproptf --opt-eps .001 -j 7 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-path 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .064 --lr-noise 0.42 0.9 ``` ### ResNeXt-50 32x4d w/ RandAugment - 79.762 top-1, 94.60 top-5 These params will also work well for SE-ResNeXt-50 and SK-ResNeXt-50 and likely 101. I used them for the SK-ResNeXt-50 32x4d that I trained with 2 GPU using a slightly higher LR per effective batch size (lr=0.18, b=192 per GPU). The cmd line below are tuned for 8 GPU training. ```bash ./distributed_train.sh 8 /imagenet --model resnext50_32x4d --lr 0.6 --warmup-epochs 5 --epochs 240 --weight-decay 1e-4 --sched cosine --reprob 0.4 --recount 3 --remode pixel --aa rand-m7-mstd0.5-inc1 -b 192 -j 6 --amp --dist-bn reduce ```
pytorch-image-models/hfdocs/source/training_script.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/training_script.mdx", "repo_id": "pytorch-image-models", "token_count": 2320 }
175
DEFAULT_CROP_PCT = 0.875 DEFAULT_CROP_MODE = 'center' IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) IMAGENET_DPN_MEAN = (124 / 255, 117 / 255, 104 / 255) IMAGENET_DPN_STD = tuple([1 / (.0167 * 255)] * 3) OPENAI_CLIP_MEAN = (0.48145466, 0.4578275, 0.40821073) OPENAI_CLIP_STD = (0.26862954, 0.26130258, 0.27577711)
pytorch-image-models/timm/data/constants.py/0
{ "file_path": "pytorch-image-models/timm/data/constants.py", "repo_id": "pytorch-image-models", "token_count": 236 }
176
""" A dataset reader that extracts images from folders Folders are scanned recursively to find image files. Labels are based on the folder hierarchy, just leaf folders by default. Hacked together by / Copyright 2020 Ross Wightman """ import os from typing import Dict, List, Optional, Set, Tuple, Union from timm.utils.misc import natural_key from .class_map import load_class_map from .img_extensions import get_img_extensions from .reader import Reader def find_images_and_targets( folder: str, types: Optional[Union[List, Tuple, Set]] = None, class_to_idx: Optional[Dict] = None, leaf_name_only: bool = True, sort: bool = True ): """ Walk folder recursively to discover images and map them to classes by folder names. Args: folder: root of folder to recrusively search types: types (file extensions) to search for in path class_to_idx: specify mapping for class (folder name) to class index if set leaf_name_only: use only leaf-name of folder walk for class names sort: re-sort found images by name (for consistent ordering) Returns: A list of image and target tuples, class_to_idx mapping """ types = get_img_extensions(as_set=True) if not types else set(types) labels = [] filenames = [] for root, subdirs, files in os.walk(folder, topdown=False, followlinks=True): rel_path = os.path.relpath(root, folder) if (root != folder) else '' label = os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_') for f in files: base, ext = os.path.splitext(f) if ext.lower() in types: filenames.append(os.path.join(root, f)) labels.append(label) if class_to_idx is None: # building class index unique_labels = set(labels) sorted_labels = list(sorted(unique_labels, key=natural_key)) class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} images_and_targets = [(f, class_to_idx[l]) for f, l in zip(filenames, labels) if l in class_to_idx] if sort: images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0])) return images_and_targets, class_to_idx class ReaderImageFolder(Reader): def __init__( self, root, class_map='', input_key=None, ): super().__init__() self.root = root class_to_idx = None if class_map: class_to_idx = load_class_map(class_map, root) find_types = None if input_key: find_types = input_key.split(';') self.samples, self.class_to_idx = find_images_and_targets( root, class_to_idx=class_to_idx, types=find_types, ) if len(self.samples) == 0: raise RuntimeError( f'Found 0 images in subfolders of {root}. ' f'Supported image extensions are {", ".join(get_img_extensions())}') def __getitem__(self, index): path, target = self.samples[index] return open(path, 'rb'), target def __len__(self): return len(self.samples) def _filename(self, index, basename=False, absolute=False): filename = self.samples[index][0] if basename: filename = os.path.basename(filename) elif not absolute: filename = os.path.relpath(filename, self.root) return filename
pytorch-image-models/timm/data/readers/reader_image_folder.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/reader_image_folder.py", "repo_id": "pytorch-image-models", "token_count": 1510 }
177
""" Attention Pool 2D Implementations of 2D spatial feature pooling using multi-head attention instead of average pool. Based on idea in CLIP by OpenAI, licensed Apache 2.0 https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py Hacked together by / Copyright 2021 Ross Wightman """ from typing import Union, Tuple import torch import torch.nn as nn from .helpers import to_2tuple from .pos_embed_sincos import apply_rot_embed, RotaryEmbedding from .weight_init import trunc_normal_ class RotAttentionPool2d(nn.Module): """ Attention based 2D feature pooling w/ rotary (relative) pos embedding. This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. Adapted from the AttentionPool2d in CLIP w/ rotary embedding instead of learned embed. https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py NOTE: While this impl does not require a fixed feature size, performance at differeing resolutions from train varies widely and falls off dramatically. I'm not sure if there is a way around this... -RW """ def __init__( self, in_features: int, out_features: int = None, embed_dim: int = None, num_heads: int = 4, qkv_bias: bool = True, ): super().__init__() embed_dim = embed_dim or in_features out_features = out_features or in_features self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) self.proj = nn.Linear(embed_dim, out_features) self.num_heads = num_heads assert embed_dim % num_heads == 0 self.head_dim = embed_dim // num_heads self.scale = self.head_dim ** -0.5 self.pos_embed = RotaryEmbedding(self.head_dim) trunc_normal_(self.qkv.weight, std=in_features ** -0.5) nn.init.zeros_(self.qkv.bias) def forward(self, x): B, _, H, W = x.shape N = H * W x = x.reshape(B, -1, N).permute(0, 2, 1) x = torch.cat([x.mean(1, keepdim=True), x], dim=1) x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k, v = x[0], x[1], x[2] qc, q = q[:, :, :1], q[:, :, 1:] sin_emb, cos_emb = self.pos_embed.get_embed((H, W)) q = apply_rot_embed(q, sin_emb, cos_emb) q = torch.cat([qc, q], dim=2) kc, k = k[:, :, :1], k[:, :, 1:] k = apply_rot_embed(k, sin_emb, cos_emb) k = torch.cat([kc, k], dim=2) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) x = self.proj(x) return x[:, 0] class AttentionPool2d(nn.Module): """ Attention based 2D feature pooling w/ learned (absolute) pos embedding. This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. It was based on impl in CLIP by OpenAI https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py NOTE: This requires feature size upon construction and well prevent adaptive sizing of the network. """ def __init__( self, in_features: int, feat_size: Union[int, Tuple[int, int]], out_features: int = None, embed_dim: int = None, num_heads: int = 4, qkv_bias: bool = True, ): super().__init__() embed_dim = embed_dim or in_features out_features = out_features or in_features assert embed_dim % num_heads == 0 self.feat_size = to_2tuple(feat_size) self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) self.proj = nn.Linear(embed_dim, out_features) self.num_heads = num_heads self.head_dim = embed_dim // num_heads self.scale = self.head_dim ** -0.5 spatial_dim = self.feat_size[0] * self.feat_size[1] self.pos_embed = nn.Parameter(torch.zeros(spatial_dim + 1, in_features)) trunc_normal_(self.pos_embed, std=in_features ** -0.5) trunc_normal_(self.qkv.weight, std=in_features ** -0.5) nn.init.zeros_(self.qkv.bias) def forward(self, x): B, _, H, W = x.shape N = H * W assert self.feat_size[0] == H assert self.feat_size[1] == W x = x.reshape(B, -1, N).permute(0, 2, 1) x = torch.cat([x.mean(1, keepdim=True), x], dim=1) x = x + self.pos_embed.unsqueeze(0).to(x.dtype) x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k, v = x[0], x[1], x[2] attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) x = self.proj(x) return x[:, 0]
pytorch-image-models/timm/layers/attention_pool2d.py/0
{ "file_path": "pytorch-image-models/timm/layers/attention_pool2d.py", "repo_id": "pytorch-image-models", "token_count": 2301 }
178
""" EvoNorm in PyTorch Based on `Evolving Normalization-Activation Layers` - https://arxiv.org/abs/2004.02967 @inproceedings{NEURIPS2020, author = {Liu, Hanxiao and Brock, Andy and Simonyan, Karen and Le, Quoc}, booktitle = {Advances in Neural Information Processing Systems}, editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin}, pages = {13539--13550}, publisher = {Curran Associates, Inc.}, title = {Evolving Normalization-Activation Layers}, url = {https://proceedings.neurips.cc/paper/2020/file/9d4c03631b8b0c85ae08bf05eda37d0f-Paper.pdf}, volume = {33}, year = {2020} } An attempt at getting decent performing EvoNorms running in PyTorch. While faster than other PyTorch impl, still quite a ways off the built-in BatchNorm in terms of memory usage and throughput on GPUs. I'm testing these modules on TPU w/ PyTorch XLA. Promising start but currently working around some issues with builtin torch/tensor.var/std. Unlike GPU, similar train speeds for EvoNormS variants and BatchNorm. Hacked together by / Copyright 2020 Ross Wightman """ from typing import Sequence, Union import torch import torch.nn as nn import torch.nn.functional as F from .create_act import create_act_layer from .trace_utils import _assert def instance_std(x, eps: float = 1e-5): std = x.float().var(dim=(2, 3), unbiased=False, keepdim=True).add(eps).sqrt().to(x.dtype) return std.expand(x.shape) def instance_std_tpu(x, eps: float = 1e-5): std = manual_var(x, dim=(2, 3)).add(eps).sqrt() return std.expand(x.shape) # instance_std = instance_std_tpu def instance_rms(x, eps: float = 1e-5): rms = x.float().square().mean(dim=(2, 3), keepdim=True).add(eps).sqrt().to(x.dtype) return rms.expand(x.shape) def manual_var(x, dim: Union[int, Sequence[int]], diff_sqm: bool = False): xm = x.mean(dim=dim, keepdim=True) if diff_sqm: # difference of squared mean and mean squared, faster on TPU can be less stable var = ((x * x).mean(dim=dim, keepdim=True) - (xm * xm)).clamp(0) else: var = ((x - xm) * (x - xm)).mean(dim=dim, keepdim=True) return var def group_std(x, groups: int = 32, eps: float = 1e-5, flatten: bool = False): B, C, H, W = x.shape x_dtype = x.dtype _assert(C % groups == 0, '') if flatten: x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues std = x.float().var(dim=2, unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) else: x = x.reshape(B, groups, C // groups, H, W) std = x.float().var(dim=(2, 3, 4), unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) return std.expand(x.shape).reshape(B, C, H, W) def group_std_tpu(x, groups: int = 32, eps: float = 1e-5, diff_sqm: bool = False, flatten: bool = False): # This is a workaround for some stability / odd behaviour of .var and .std # running on PyTorch XLA w/ TPUs. These manual var impl are producing much better results B, C, H, W = x.shape _assert(C % groups == 0, '') if flatten: x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues var = manual_var(x, dim=-1, diff_sqm=diff_sqm) else: x = x.reshape(B, groups, C // groups, H, W) var = manual_var(x, dim=(2, 3, 4), diff_sqm=diff_sqm) return var.add(eps).sqrt().expand(x.shape).reshape(B, C, H, W) #group_std = group_std_tpu # FIXME TPU temporary def group_rms(x, groups: int = 32, eps: float = 1e-5): B, C, H, W = x.shape _assert(C % groups == 0, '') x_dtype = x.dtype x = x.reshape(B, groups, C // groups, H, W) rms = x.float().square().mean(dim=(2, 3, 4), keepdim=True).add(eps).sqrt_().to(x_dtype) return rms.expand(x.shape).reshape(B, C, H, W) class EvoNorm2dB0(nn.Module): def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-3, **_): super().__init__() self.apply_act = apply_act # apply activation (non-linearity) self.momentum = momentum self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) if self.v is not None: nn.init.ones_(self.v) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.v is not None: if self.training: var = x.float().var(dim=(0, 2, 3), unbiased=False) # var = manual_var(x, dim=(0, 2, 3)).squeeze() n = x.numel() / x.shape[1] self.running_var.copy_( self.running_var * (1 - self.momentum) + var.detach() * self.momentum * (n / (n - 1))) else: var = self.running_var left = var.add(self.eps).sqrt_().to(x_dtype).view(v_shape).expand_as(x) v = self.v.to(x_dtype).view(v_shape) right = x * v + instance_std(x, self.eps) x = x / left.max(right) return x * self.weight.to(x_dtype).view(v_shape) + self.bias.to(x_dtype).view(v_shape) class EvoNorm2dB1(nn.Module): def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_): super().__init__() self.apply_act = apply_act # apply activation (non-linearity) self.momentum = momentum self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: if self.training: var = x.float().var(dim=(0, 2, 3), unbiased=False) n = x.numel() / x.shape[1] self.running_var.copy_( self.running_var * (1 - self.momentum) + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) else: var = self.running_var var = var.to(x_dtype).view(v_shape) left = var.add(self.eps).sqrt_() right = (x + 1) * instance_rms(x, self.eps) x = x / left.max(right) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dB2(nn.Module): def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_): super().__init__() self.apply_act = apply_act # apply activation (non-linearity) self.momentum = momentum self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: if self.training: var = x.float().var(dim=(0, 2, 3), unbiased=False) n = x.numel() / x.shape[1] self.running_var.copy_( self.running_var * (1 - self.momentum) + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) else: var = self.running_var var = var.to(x_dtype).view(v_shape) left = var.add(self.eps).sqrt_() right = instance_rms(x, self.eps) - x x = x / left.max(right) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS0(nn.Module): def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-5, **_): super().__init__() self.apply_act = apply_act # apply activation (non-linearity) if group_size: assert num_features % group_size == 0 self.groups = num_features // group_size else: self.groups = groups self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) if self.v is not None: nn.init.ones_(self.v) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.v is not None: v = self.v.view(v_shape).to(x_dtype) x = x * (x * v).sigmoid() / group_std(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS0a(EvoNorm2dS0): def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-3, **_): super().__init__( num_features, groups=groups, group_size=group_size, apply_act=apply_act, eps=eps) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) d = group_std(x, self.groups, self.eps) if self.v is not None: v = self.v.view(v_shape).to(x_dtype) x = x * (x * v).sigmoid() x = x / d return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS1(nn.Module): def __init__( self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-5, **_): super().__init__() act_layer = act_layer or nn.SiLU self.apply_act = apply_act # apply activation (non-linearity) if act_layer is not None and apply_act: self.act = create_act_layer(act_layer) else: self.act = nn.Identity() if group_size: assert num_features % group_size == 0 self.groups = num_features // group_size else: self.groups = groups self.eps = eps self.pre_act_norm = False self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: x = self.act(x) / group_std(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS1a(EvoNorm2dS1): def __init__( self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-3, **_): super().__init__( num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = self.act(x) / group_std(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS2(nn.Module): def __init__( self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-5, **_): super().__init__() act_layer = act_layer or nn.SiLU self.apply_act = apply_act # apply activation (non-linearity) if act_layer is not None and apply_act: self.act = create_act_layer(act_layer) else: self.act = nn.Identity() if group_size: assert num_features % group_size == 0 self.groups = num_features // group_size else: self.groups = groups self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: x = self.act(x) / group_rms(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS2a(EvoNorm2dS2): def __init__( self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-3, **_): super().__init__( num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = self.act(x) / group_rms(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
pytorch-image-models/timm/layers/evo_norm.py/0
{ "file_path": "pytorch-image-models/timm/layers/evo_norm.py", "repo_id": "pytorch-image-models", "token_count": 6684 }
179
from typing import Optional import torch from torch import nn from torch import nn, Tensor from torch.nn.modules.transformer import _get_activation_fn def add_ml_decoder_head(model): if hasattr(model, 'global_pool') and hasattr(model, 'fc'): # most CNN models, like Resnet50 model.global_pool = nn.Identity() del model.fc num_classes = model.num_classes num_features = model.num_features model.fc = MLDecoder(num_classes=num_classes, initial_num_features=num_features) elif hasattr(model, 'global_pool') and hasattr(model, 'classifier'): # EfficientNet model.global_pool = nn.Identity() del model.classifier num_classes = model.num_classes num_features = model.num_features model.classifier = MLDecoder(num_classes=num_classes, initial_num_features=num_features) elif 'RegNet' in model._get_name() or 'TResNet' in model._get_name(): # hasattr(model, 'head') del model.head num_classes = model.num_classes num_features = model.num_features model.head = MLDecoder(num_classes=num_classes, initial_num_features=num_features) else: print("Model code-writing is not aligned currently with ml-decoder") exit(-1) if hasattr(model, 'drop_rate'): # Ml-Decoder has inner dropout model.drop_rate = 0 return model class TransformerDecoderLayerOptimal(nn.Module): def __init__(self, d_model, nhead=8, dim_feedforward=2048, dropout=0.1, activation="relu", layer_norm_eps=1e-5) -> None: super(TransformerDecoderLayerOptimal, self).__init__() self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.dropout = nn.Dropout(dropout) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) # Implementation of Feedforward model self.linear1 = nn.Linear(d_model, dim_feedforward) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.activation = _get_activation_fn(activation) def __setstate__(self, state): if 'activation' not in state: state['activation'] = torch.nn.functional.relu super(TransformerDecoderLayerOptimal, self).__setstate__(state) def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None) -> Tensor: tgt = tgt + self.dropout1(tgt) tgt = self.norm1(tgt) tgt2 = self.multihead_attn(tgt, memory, memory)[0] tgt = tgt + self.dropout2(tgt2) tgt = self.norm2(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) tgt = tgt + self.dropout3(tgt2) tgt = self.norm3(tgt) return tgt # @torch.jit.script # class ExtrapClasses(object): # def __init__(self, num_queries: int, group_size: int): # self.num_queries = num_queries # self.group_size = group_size # # def __call__(self, h: torch.Tensor, class_embed_w: torch.Tensor, class_embed_b: torch.Tensor, out_extrap: # torch.Tensor): # # h = h.unsqueeze(-1).expand(-1, -1, -1, self.group_size) # h = h[..., None].repeat(1, 1, 1, self.group_size) # torch.Size([bs, 5, 768, groups]) # w = class_embed_w.view((self.num_queries, h.shape[2], self.group_size)) # out = (h * w).sum(dim=2) + class_embed_b # out = out.view((h.shape[0], self.group_size * self.num_queries)) # return out @torch.jit.script class GroupFC(object): def __init__(self, embed_len_decoder: int): self.embed_len_decoder = embed_len_decoder def __call__(self, h: torch.Tensor, duplicate_pooling: torch.Tensor, out_extrap: torch.Tensor): for i in range(self.embed_len_decoder): h_i = h[:, i, :] w_i = duplicate_pooling[i, :, :] out_extrap[:, i, :] = torch.matmul(h_i, w_i) class MLDecoder(nn.Module): def __init__(self, num_classes, num_of_groups=-1, decoder_embedding=768, initial_num_features=2048): super(MLDecoder, self).__init__() embed_len_decoder = 100 if num_of_groups < 0 else num_of_groups if embed_len_decoder > num_classes: embed_len_decoder = num_classes # switching to 768 initial embeddings decoder_embedding = 768 if decoder_embedding < 0 else decoder_embedding self.embed_standart = nn.Linear(initial_num_features, decoder_embedding) # decoder decoder_dropout = 0.1 num_layers_decoder = 1 dim_feedforward = 2048 layer_decode = TransformerDecoderLayerOptimal(d_model=decoder_embedding, dim_feedforward=dim_feedforward, dropout=decoder_dropout) self.decoder = nn.TransformerDecoder(layer_decode, num_layers=num_layers_decoder) # non-learnable queries self.query_embed = nn.Embedding(embed_len_decoder, decoder_embedding) self.query_embed.requires_grad_(False) # group fully-connected self.num_classes = num_classes self.duplicate_factor = int(num_classes / embed_len_decoder + 0.999) self.duplicate_pooling = torch.nn.Parameter( torch.Tensor(embed_len_decoder, decoder_embedding, self.duplicate_factor)) self.duplicate_pooling_bias = torch.nn.Parameter(torch.Tensor(num_classes)) torch.nn.init.xavier_normal_(self.duplicate_pooling) torch.nn.init.constant_(self.duplicate_pooling_bias, 0) self.group_fc = GroupFC(embed_len_decoder) def forward(self, x): if len(x.shape) == 4: # [bs,2048, 7,7] embedding_spatial = x.flatten(2).transpose(1, 2) else: # [bs, 197,468] embedding_spatial = x embedding_spatial_786 = self.embed_standart(embedding_spatial) embedding_spatial_786 = torch.nn.functional.relu(embedding_spatial_786, inplace=True) bs = embedding_spatial_786.shape[0] query_embed = self.query_embed.weight # tgt = query_embed.unsqueeze(1).repeat(1, bs, 1) tgt = query_embed.unsqueeze(1).expand(-1, bs, -1) # no allocation of memory with expand h = self.decoder(tgt, embedding_spatial_786.transpose(0, 1)) # [embed_len_decoder, batch, 768] h = h.transpose(0, 1) out_extrap = torch.zeros(h.shape[0], h.shape[1], self.duplicate_factor, device=h.device, dtype=h.dtype) self.group_fc(h, self.duplicate_pooling, out_extrap) h_out = out_extrap.flatten(1)[:, :self.num_classes] h_out += self.duplicate_pooling_bias logits = h_out return logits
pytorch-image-models/timm/layers/ml_decoder.py/0
{ "file_path": "pytorch-image-models/timm/layers/ml_decoder.py", "repo_id": "pytorch-image-models", "token_count": 3177 }
180
""" Split BatchNorm A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through a separate BN layer. The first split is passed through the parent BN layers with weight/bias keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn' namespace. This allows easily removing the auxiliary BN layers after training to efficiently achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2, 'Disentangled Learning via An Auxiliary BN' Hacked together by / Copyright 2020 Ross Wightman """ import torch import torch.nn as nn class SplitBatchNorm2d(torch.nn.BatchNorm2d): def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, num_splits=2): super().__init__(num_features, eps, momentum, affine, track_running_stats) assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)' self.num_splits = num_splits self.aux_bn = nn.ModuleList([ nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)]) def forward(self, input: torch.Tensor): if self.training: # aux BN only relevant while training split_size = input.shape[0] // self.num_splits assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits" split_input = input.split(split_size) x = [super().forward(split_input[0])] for i, a in enumerate(self.aux_bn): x.append(a(split_input[i + 1])) return torch.cat(x, dim=0) else: return super().forward(input) def convert_splitbn_model(module, num_splits=2): """ Recursively traverse module and its children to replace all instances of ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`. Args: module (torch.nn.Module): input module num_splits: number of separate batchnorm layers to split input across Example:: >>> # model is an instance of torch.nn.Module >>> model = timm.models.convert_splitbn_model(model, num_splits=2) """ mod = module if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm): return module if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): mod = SplitBatchNorm2d( module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, num_splits=num_splits) mod.running_mean = module.running_mean mod.running_var = module.running_var mod.num_batches_tracked = module.num_batches_tracked if module.affine: mod.weight.data = module.weight.data.clone().detach() mod.bias.data = module.bias.data.clone().detach() for aux in mod.aux_bn: aux.running_mean = module.running_mean.clone() aux.running_var = module.running_var.clone() aux.num_batches_tracked = module.num_batches_tracked.clone() if module.affine: aux.weight.data = module.weight.data.clone().detach() aux.bias.data = module.bias.data.clone().detach() for name, child in module.named_children(): mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits)) del module return mod
pytorch-image-models/timm/layers/split_batchnorm.py/0
{ "file_path": "pytorch-image-models/timm/layers/split_batchnorm.py", "repo_id": "pytorch-image-models", "token_count": 1394 }
181
import os from typing import Any, Dict, Optional, Union from urllib.parse import urlsplit from timm.layers import set_layer_config from ._helpers import load_checkpoint from ._hub import load_model_config_from_hf from ._pretrained import PretrainedCfg from ._registry import is_model, model_entrypoint, split_model_name_tag __all__ = ['parse_model_name', 'safe_model_name', 'create_model'] def parse_model_name(model_name: str): if model_name.startswith('hf_hub'): # NOTE for backwards compat, deprecate hf_hub use model_name = model_name.replace('hf_hub', 'hf-hub') parsed = urlsplit(model_name) assert parsed.scheme in ('', 'timm', 'hf-hub') if parsed.scheme == 'hf-hub': # FIXME may use fragment as revision, currently `@` in URI path return parsed.scheme, parsed.path else: model_name = os.path.split(parsed.path)[-1] return 'timm', model_name def safe_model_name(model_name: str, remove_source: bool = True): # return a filename / path safe model name def make_safe(name): return ''.join(c if c.isalnum() else '_' for c in name).rstrip('_') if remove_source: model_name = parse_model_name(model_name)[-1] return make_safe(model_name) def create_model( model_name: str, pretrained: bool = False, pretrained_cfg: Optional[Union[str, Dict[str, Any], PretrainedCfg]] = None, pretrained_cfg_overlay: Optional[Dict[str, Any]] = None, checkpoint_path: str = '', scriptable: Optional[bool] = None, exportable: Optional[bool] = None, no_jit: Optional[bool] = None, **kwargs, ): """Create a model. Lookup model's entrypoint function and pass relevant args to create a new model. <Tip> **kwargs will be passed through entrypoint fn to ``timm.models.build_model_with_cfg()`` and then the model class __init__(). kwargs values set to None are pruned before passing. </Tip> Args: model_name: Name of model to instantiate. pretrained: If set to `True`, load pretrained ImageNet-1k weights. pretrained_cfg: Pass in an external pretrained_cfg for model. pretrained_cfg_overlay: Replace key-values in base pretrained_cfg with these. checkpoint_path: Path of checkpoint to load _after_ the model is initialized. scriptable: Set layer config so that model is jit scriptable (not working for all models yet). exportable: Set layer config so that model is traceable / ONNX exportable (not fully impl/obeyed yet). no_jit: Set layer config so that model doesn't utilize jit scripted layers (so far activations only). Keyword Args: drop_rate (float): Classifier dropout rate for training. drop_path_rate (float): Stochastic depth drop rate for training. global_pool (str): Classifier global pooling type. Example: ```py >>> from timm import create_model >>> # Create a MobileNetV3-Large model with no pretrained weights. >>> model = create_model('mobilenetv3_large_100') >>> # Create a MobileNetV3-Large model with pretrained weights. >>> model = create_model('mobilenetv3_large_100', pretrained=True) >>> model.num_classes 1000 >>> # Create a MobileNetV3-Large model with pretrained weights and a new head with 10 classes. >>> model = create_model('mobilenetv3_large_100', pretrained=True, num_classes=10) >>> model.num_classes 10 ``` """ # Parameters that aren't supported by all models or are intended to only override model defaults if set # should default to None in command line args/cfg. Remove them if they are present and not set so that # non-supporting models don't break and default args remain in effect. kwargs = {k: v for k, v in kwargs.items() if v is not None} model_source, model_name = parse_model_name(model_name) if model_source == 'hf-hub': assert not pretrained_cfg, 'pretrained_cfg should not be set when sourcing model from Hugging Face Hub.' # For model names specified in the form `hf-hub:path/architecture_name@revision`, # load model weights + pretrained_cfg from Hugging Face hub. pretrained_cfg, model_name, model_args = load_model_config_from_hf(model_name) if model_args: for k, v in model_args.items(): kwargs.setdefault(k, v) else: model_name, pretrained_tag = split_model_name_tag(model_name) if pretrained_tag and not pretrained_cfg: # a valid pretrained_cfg argument takes priority over tag in model name pretrained_cfg = pretrained_tag if not is_model(model_name): raise RuntimeError('Unknown model (%s)' % model_name) create_fn = model_entrypoint(model_name) with set_layer_config(scriptable=scriptable, exportable=exportable, no_jit=no_jit): model = create_fn( pretrained=pretrained, pretrained_cfg=pretrained_cfg, pretrained_cfg_overlay=pretrained_cfg_overlay, **kwargs, ) if checkpoint_path: load_checkpoint(model, checkpoint_path) return model
pytorch-image-models/timm/models/_factory.py/0
{ "file_path": "pytorch-image-models/timm/models/_factory.py", "repo_id": "pytorch-image-models", "token_count": 1944 }
182
""" Bring-Your-Own-Blocks Network A flexible network w/ dataclass based config for stacking those NN blocks. This model is currently used to implement the following networks: GPU Efficient (ResNets) - gernet_l/m/s (original versions called genet, but this was already used (by SENet author)). Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 Code and weights: https://github.com/idstcv/GPU-Efficient-Networks, licensed Apache 2.0 RepVGG - repvgg_* Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 Code and weights: https://github.com/DingXiaoH/RepVGG, licensed MIT MobileOne - mobileone_* Paper: `MobileOne: An Improved One millisecond Mobile Backbone` - https://arxiv.org/abs/2206.04040 Code and weights: https://github.com/apple/ml-mobileone, licensed MIT In all cases the models have been modified to fit within the design of ByobNet. I've remapped the original weights and verified accuracies. For GPU Efficient nets, I used the original names for the blocks since they were for the most part the same as original residual blocks in ResNe(X)t, DarkNet, and other existing models. Note also some changes introduced in RegNet were also present in the stem and bottleneck blocks for this model. A significant number of different network archs can be implemented here, including variants of the above nets that include attention. Hacked together by / copyright Ross Wightman, 2021. """ import math from dataclasses import dataclass, field, replace from functools import partial from typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ClassifierHead, ConvNormAct, BatchNormAct2d, DropPath, AvgPool2dSame, \ create_conv2d, get_act_layer, get_norm_act_layer, get_attn, make_divisible, to_2tuple, EvoNorm2dS0a from ._builder import build_model_with_cfg from ._manipulate import named_apply, checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block'] @dataclass class ByoBlockCfg: type: Union[str, nn.Module] d: int # block depth (number of block repeats in stage) c: int # number of output channels for each block in stage s: int = 2 # stride of stage (first block) gs: Optional[Union[int, Callable]] = None # group-size of blocks in stage, conv is depthwise if gs == 1 br: float = 1. # bottleneck-ratio of blocks in stage # NOTE: these config items override the model cfgs that are applied to all blocks by default attn_layer: Optional[str] = None attn_kwargs: Optional[Dict[str, Any]] = None self_attn_layer: Optional[str] = None self_attn_kwargs: Optional[Dict[str, Any]] = None block_kwargs: Optional[Dict[str, Any]] = None @dataclass class ByoModelCfg: blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...] downsample: str = 'conv1x1' stem_type: str = '3x3' stem_pool: Optional[str] = 'maxpool' stem_chs: int = 32 width_factor: float = 1.0 num_features: int = 0 # num out_channels for final conv, no final 1x1 conv if 0 zero_init_last: bool = True # zero init last weight (usually bn) in residual path fixed_input_size: bool = False # model constrained to a fixed-input size / img_size must be provided on creation act_layer: str = 'relu' norm_layer: str = 'batchnorm' # NOTE: these config items will be overridden by the block cfg (per-block) if they are set there attn_layer: Optional[str] = None attn_kwargs: dict = field(default_factory=lambda: dict()) self_attn_layer: Optional[str] = None self_attn_kwargs: dict = field(default_factory=lambda: dict()) block_kwargs: Dict[str, Any] = field(default_factory=lambda: dict()) def _rep_vgg_bcfg(d=(4, 6, 16, 1), wf=(1., 1., 1., 1.), groups=0): c = (64, 128, 256, 512) group_size = 0 if groups > 0: group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0 bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for d, c, wf in zip(d, c, wf)]) return bcfg def _mobileone_bcfg(d=(2, 8, 10, 1), wf=(1., 1., 1., 1.), se_blocks=(), num_conv_branches=1): c = (64, 128, 256, 512) prev_c = min(64, c[0] * wf[0]) se_blocks = se_blocks or (0,) * len(d) bcfg = [] for d, c, w, se in zip(d, c, wf, se_blocks): scfg = [] for i in range(d): out_c = c * w bk = dict(num_conv_branches=num_conv_branches) ak = {} if i >= d - se: ak['attn_layer'] = 'se' scfg += [ByoBlockCfg(type='one', d=1, c=prev_c, gs=1, block_kwargs=bk, **ak)] # depthwise block scfg += [ByoBlockCfg( type='one', d=1, c=out_c, gs=0, block_kwargs=dict(kernel_size=1, **bk), **ak)] # pointwise block prev_c = out_c bcfg += [scfg] return bcfg def interleave_blocks( types: Tuple[str, str], d, every: Union[int, List[int]] = 1, first: bool = False, **kwargs, ) -> Tuple[ByoBlockCfg]: """ interleave 2 block types in stack """ assert len(types) == 2 if isinstance(every, int): every = list(range(0 if first else every, d, every + 1)) if not every: every = [d - 1] set(every) blocks = [] for i in range(d): block_type = types[1] if i in every else types[0] blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)] return tuple(blocks) def expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]: if not isinstance(stage_blocks_cfg, Sequence): stage_blocks_cfg = (stage_blocks_cfg,) block_cfgs = [] for i, cfg in enumerate(stage_blocks_cfg): block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)] return block_cfgs def num_groups(group_size, channels): if not group_size: # 0 or None return 1 # normal conv with 1 group else: # NOTE group_size == 1 -> depthwise conv assert channels % group_size == 0 return channels // group_size @dataclass class LayerFn: conv_norm_act: Callable = ConvNormAct norm_act: Callable = BatchNormAct2d act: Callable = nn.ReLU attn: Optional[Callable] = None self_attn: Optional[Callable] = None class DownsampleAvg(nn.Module): def __init__( self, in_chs: int, out_chs: int, stride: int = 1, dilation: int = 1, apply_act: bool = False, layers: LayerFn = None, ): """ AvgPool Downsampling as in 'D' ResNet variants.""" super(DownsampleAvg, self).__init__() layers = layers or LayerFn() avg_stride = stride if dilation == 1 else 1 if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) else: self.pool = nn.Identity() self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act) def forward(self, x): return self.conv(self.pool(x)) def create_shortcut( downsample_type: str, in_chs: int, out_chs: int, stride: int, dilation: Tuple[int, int], layers: LayerFn, **kwargs, ): assert downsample_type in ('avg', 'conv1x1', '') if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: if not downsample_type: return None # no shortcut elif downsample_type == 'avg': return DownsampleAvg(in_chs, out_chs, stride=stride, dilation=dilation[0], **kwargs) else: return layers.conv_norm_act(in_chs, out_chs, kernel_size=1, stride=stride, dilation=dilation[0], **kwargs) else: return nn.Identity() # identity shortcut class BasicBlock(nn.Module): """ ResNet Basic Block - kxk + kxk """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), group_size: Optional[int] = None, bottle_ratio: float = 1.0, downsample: str = 'avg', attn_last: bool = True, linear_out: bool = False, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ): super(BasicBlock, self).__init__() layers = layers or LayerFn() mid_chs = make_divisible(out_chs * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut( downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers, ) self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0]) self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) self.conv2_kxk = layers.conv_norm_act( mid_chs, out_chs, kernel_size, dilation=dilation[1], groups=groups, drop_layer=drop_block, apply_act=False, ) self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): if zero_init_last and self.shortcut is not None and getattr(self.conv2_kxk.bn, 'weight', None) is not None: nn.init.zeros_(self.conv2_kxk.bn.weight) for attn in (self.attn, self.attn_last): if hasattr(attn, 'reset_parameters'): attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_kxk(x) x = self.conv2_kxk(x) x = self.attn(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) class BottleneckBlock(nn.Module): """ ResNet-like Bottleneck Block - 1x1 - kxk - 1x1 """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1., group_size: Optional[int] = None, downsample: str = 'avg', attn_last: bool = False, linear_out: bool = False, extra_conv: bool = False, bottle_in: bool = False, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ): super(BottleneckBlock, self).__init__() layers = layers or LayerFn() mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut( downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers, ) self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) self.conv2_kxk = layers.conv_norm_act( mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, ) if extra_conv: self.conv2b_kxk = layers.conv_norm_act( mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups) else: self.conv2b_kxk = nn.Identity() self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): if zero_init_last and self.shortcut is not None and getattr(self.conv3_1x1.bn, 'weight', None) is not None: nn.init.zeros_(self.conv3_1x1.bn.weight) for attn in (self.attn, self.attn_last): if hasattr(attn, 'reset_parameters'): attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_1x1(x) x = self.conv2_kxk(x) x = self.conv2b_kxk(x) x = self.attn(x) x = self.conv3_1x1(x) x = self.attn_last(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) class DarkBlock(nn.Module): """ DarkNet-like (1x1 + 3x3 w/ stride) block The GE-Net impl included a 1x1 + 3x3 block in their search space. It was not used in the feature models. This block is pretty much a DarkNet block (also DenseNet) hence the name. Neither DarkNet or DenseNet uses strides within the block (external 3x3 or maxpool downsampling is done in front of the block repeats). If one does want to use a lot of these blocks w/ stride, I'd recommend using the EdgeBlock (3x3 /w stride + 1x1) for more optimal compute. """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1.0, group_size: Optional[int] = None, downsample: str = 'avg', attn_last: bool = True, linear_out: bool = False, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ): super(DarkBlock, self).__init__() layers = layers or LayerFn() mid_chs = make_divisible(out_chs * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut( downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers, ) self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) self.conv2_kxk = layers.conv_norm_act( mid_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, apply_act=False, ) self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): if zero_init_last and self.shortcut is not None and getattr(self.conv2_kxk.bn, 'weight', None) is not None: nn.init.zeros_(self.conv2_kxk.bn.weight) for attn in (self.attn, self.attn_last): if hasattr(attn, 'reset_parameters'): attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_1x1(x) x = self.attn(x) x = self.conv2_kxk(x) x = self.attn_last(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) class EdgeBlock(nn.Module): """ EdgeResidual-like (3x3 + 1x1) block A two layer block like DarkBlock, but with the order of the 3x3 and 1x1 convs reversed. Very similar to the EfficientNet Edge-Residual block but this block it ends with activations, is intended to be used with either expansion or bottleneck contraction, and can use DW/group/non-grouped convs. FIXME is there a more common 3x3 + 1x1 conv block to name this after? """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1.0, group_size: Optional[int] = None, downsample: str = 'avg', attn_last: bool = False, linear_out: bool = False, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ): super(EdgeBlock, self).__init__() layers = layers or LayerFn() mid_chs = make_divisible(out_chs * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut( downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers, ) self.conv1_kxk = layers.conv_norm_act( in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, ) self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): if zero_init_last and self.shortcut is not None and getattr(self.conv2_1x1.bn, 'weight', None) is not None: nn.init.zeros_(self.conv2_1x1.bn.weight) for attn in (self.attn, self.attn_last): if hasattr(attn, 'reset_parameters'): attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_kxk(x) x = self.attn(x) x = self.conv2_1x1(x) x = self.attn_last(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) class RepVggBlock(nn.Module): """ RepVGG Block. Adapted from impl at https://github.com/DingXiaoH/RepVGG """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1.0, group_size: Optional[int] = None, downsample: str = '', layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., inference_mode: bool = False ): super(RepVggBlock, self).__init__() self.groups = groups = num_groups(group_size, in_chs) layers = layers or LayerFn() if inference_mode: self.reparam_conv = nn.Conv2d( in_channels=in_chs, out_channels=out_chs, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=groups, bias=True, ) else: self.reparam_conv = None use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1] self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None self.conv_kxk = layers.conv_norm_act( in_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, apply_act=False, ) self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity() self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs) self.act = layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): # NOTE this init overrides that base model init with specific changes for the block type for m in self.modules(): if isinstance(m, nn.BatchNorm2d): nn.init.normal_(m.weight, .1, .1) nn.init.normal_(m.bias, 0, .1) if hasattr(self.attn, 'reset_parameters'): self.attn.reset_parameters() def forward(self, x): if self.reparam_conv is not None: return self.act(self.attn(self.reparam_conv(x))) if self.identity is None: x = self.conv_1x1(x) + self.conv_kxk(x) else: identity = self.identity(x) x = self.conv_1x1(x) + self.conv_kxk(x) x = self.drop_path(x) # not in the paper / official impl, experimental x += identity x = self.attn(x) # no attn in the paper / official impl, experimental return self.act(x) def reparameterize(self): """ Following works like `RepVGG: Making VGG-style ConvNets Great Again` - https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched architecture used at training time to obtain a plain CNN-like structure for inference. """ if self.reparam_conv is not None: return kernel, bias = self._get_kernel_bias() self.reparam_conv = nn.Conv2d( in_channels=self.conv_kxk.conv.in_channels, out_channels=self.conv_kxk.conv.out_channels, kernel_size=self.conv_kxk.conv.kernel_size, stride=self.conv_kxk.conv.stride, padding=self.conv_kxk.conv.padding, dilation=self.conv_kxk.conv.dilation, groups=self.conv_kxk.conv.groups, bias=True, ) self.reparam_conv.weight.data = kernel self.reparam_conv.bias.data = bias # Delete un-used branches for name, para in self.named_parameters(): if 'reparam_conv' in name: continue para.detach_() self.__delattr__('conv_kxk') self.__delattr__('conv_1x1') self.__delattr__('identity') self.__delattr__('drop_path') def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]: """ Method to obtain re-parameterized kernel and bias. Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83 """ # get weights and bias of scale branch kernel_1x1 = 0 bias_1x1 = 0 if self.conv_1x1 is not None: kernel_1x1, bias_1x1 = self._fuse_bn_tensor(self.conv_1x1) # Pad scale branch kernel to match conv branch kernel size. pad = self.conv_kxk.conv.kernel_size[0] // 2 kernel_1x1 = torch.nn.functional.pad(kernel_1x1, [pad, pad, pad, pad]) # get weights and bias of skip branch kernel_identity = 0 bias_identity = 0 if self.identity is not None: kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity) # get weights and bias of conv branches kernel_conv, bias_conv = self._fuse_bn_tensor(self.conv_kxk) kernel_final = kernel_conv + kernel_1x1 + kernel_identity bias_final = bias_conv + bias_1x1 + bias_identity return kernel_final, bias_final def _fuse_bn_tensor(self, branch) -> Tuple[torch.Tensor, torch.Tensor]: """ Method to fuse batchnorm layer with preceeding conv layer. Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95 """ if isinstance(branch, ConvNormAct): kernel = branch.conv.weight running_mean = branch.bn.running_mean running_var = branch.bn.running_var gamma = branch.bn.weight beta = branch.bn.bias eps = branch.bn.eps else: assert isinstance(branch, nn.BatchNorm2d) if not hasattr(self, 'id_tensor'): in_chs = self.conv_kxk.conv.in_channels input_dim = in_chs // self.groups kernel_size = self.conv_kxk.conv.kernel_size kernel_value = torch.zeros_like(self.conv_kxk.conv.weight) for i in range(in_chs): kernel_value[i, i % input_dim, kernel_size[0] // 2, kernel_size[1] // 2] = 1 self.id_tensor = kernel_value kernel = self.id_tensor running_mean = branch.running_mean running_var = branch.running_var gamma = branch.weight beta = branch.bias eps = branch.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return kernel * t, beta - running_mean * gamma / std class MobileOneBlock(nn.Module): """ MobileOne building block. This block has a multi-branched architecture at train-time and plain-CNN style architecture at inference time For more details, please refer to our paper: `An Improved One millisecond Mobile Backbone` - https://arxiv.org/pdf/2206.04040.pdf """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1.0, # unused group_size: Optional[int] = None, downsample: str = '', # unused inference_mode: bool = False, num_conv_branches: int = 1, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ) -> None: """ Construct a MobileOneBlock module. """ super(MobileOneBlock, self).__init__() self.num_conv_branches = num_conv_branches self.groups = groups = num_groups(group_size, in_chs) layers = layers or LayerFn() if inference_mode: self.reparam_conv = nn.Conv2d( in_channels=in_chs, out_channels=out_chs, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=groups, bias=True) else: self.reparam_conv = None # Re-parameterizable skip connection use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1] self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None # Re-parameterizable conv branches convs = [] for _ in range(self.num_conv_branches): convs.append(layers.conv_norm_act( in_chs, out_chs, kernel_size=kernel_size, stride=stride, groups=groups, apply_act=False)) self.conv_kxk = nn.ModuleList(convs) # Re-parameterizable scale branch self.conv_scale = None if kernel_size > 1: self.conv_scale = layers.conv_norm_act( in_chs, out_chs, kernel_size=1, stride=stride, groups=groups, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity() self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs) self.act = layers.act(inplace=True) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Apply forward pass. """ # Inference mode forward pass. if self.reparam_conv is not None: return self.act(self.attn(self.reparam_conv(x))) # Multi-branched train-time forward pass. # Skip branch output identity_out = 0 if self.identity is not None: identity_out = self.identity(x) # Scale branch output scale_out = 0 if self.conv_scale is not None: scale_out = self.conv_scale(x) # Other branches out = scale_out for ck in self.conv_kxk: out += ck(x) out = self.drop_path(out) out += identity_out return self.act(self.attn(out)) def reparameterize(self): """ Following works like `RepVGG: Making VGG-style ConvNets Great Again` - https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched architecture used at training time to obtain a plain CNN-like structure for inference. """ if self.reparam_conv is not None: return kernel, bias = self._get_kernel_bias() self.reparam_conv = nn.Conv2d( in_channels=self.conv_kxk[0].conv.in_channels, out_channels=self.conv_kxk[0].conv.out_channels, kernel_size=self.conv_kxk[0].conv.kernel_size, stride=self.conv_kxk[0].conv.stride, padding=self.conv_kxk[0].conv.padding, dilation=self.conv_kxk[0].conv.dilation, groups=self.conv_kxk[0].conv.groups, bias=True) self.reparam_conv.weight.data = kernel self.reparam_conv.bias.data = bias # Delete un-used branches for name, para in self.named_parameters(): if 'reparam_conv' in name: continue para.detach_() self.__delattr__('conv_kxk') self.__delattr__('conv_scale') self.__delattr__('identity') self.__delattr__('drop_path') def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]: """ Method to obtain re-parameterized kernel and bias. Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83 """ # get weights and bias of scale branch kernel_scale = 0 bias_scale = 0 if self.conv_scale is not None: kernel_scale, bias_scale = self._fuse_bn_tensor(self.conv_scale) # Pad scale branch kernel to match conv branch kernel size. pad = self.conv_kxk[0].conv.kernel_size[0] // 2 kernel_scale = torch.nn.functional.pad(kernel_scale, [pad, pad, pad, pad]) # get weights and bias of skip branch kernel_identity = 0 bias_identity = 0 if self.identity is not None: kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity) # get weights and bias of conv branches kernel_conv = 0 bias_conv = 0 for ix in range(self.num_conv_branches): _kernel, _bias = self._fuse_bn_tensor(self.conv_kxk[ix]) kernel_conv += _kernel bias_conv += _bias kernel_final = kernel_conv + kernel_scale + kernel_identity bias_final = bias_conv + bias_scale + bias_identity return kernel_final, bias_final def _fuse_bn_tensor(self, branch) -> Tuple[torch.Tensor, torch.Tensor]: """ Method to fuse batchnorm layer with preceeding conv layer. Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95 """ if isinstance(branch, ConvNormAct): kernel = branch.conv.weight running_mean = branch.bn.running_mean running_var = branch.bn.running_var gamma = branch.bn.weight beta = branch.bn.bias eps = branch.bn.eps else: assert isinstance(branch, nn.BatchNorm2d) if not hasattr(self, 'id_tensor'): in_chs = self.conv_kxk[0].conv.in_channels input_dim = in_chs // self.groups kernel_size = self.conv_kxk[0].conv.kernel_size kernel_value = torch.zeros_like(self.conv_kxk[0].conv.weight) for i in range(in_chs): kernel_value[i, i % input_dim, kernel_size[0] // 2, kernel_size[1] // 2] = 1 self.id_tensor = kernel_value kernel = self.id_tensor running_mean = branch.running_mean running_var = branch.running_var gamma = branch.weight beta = branch.bias eps = branch.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return kernel * t, beta - running_mean * gamma / std class SelfAttnBlock(nn.Module): """ ResNet-like Bottleneck Block - 1x1 - optional kxk - self attn - 1x1 """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1., group_size: Optional[int] = None, downsample: str = 'avg', extra_conv: bool = False, linear_out: bool = False, bottle_in: bool = False, post_attn_na: bool = True, feat_size: Optional[Tuple[int, int]] = None, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ): super(SelfAttnBlock, self).__init__() assert layers is not None mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut( downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers, ) self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) if extra_conv: self.conv2_kxk = layers.conv_norm_act( mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, ) stride = 1 # striding done via conv if enabled else: self.conv2_kxk = nn.Identity() opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size) # FIXME need to dilate self attn to have dilated network support, moop moop self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs) self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity() self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): if zero_init_last and self.shortcut is not None and getattr(self.conv3_1x1.bn, 'weight', None) is not None: nn.init.zeros_(self.conv3_1x1.bn.weight) if hasattr(self.self_attn, 'reset_parameters'): self.self_attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_1x1(x) x = self.conv2_kxk(x) x = self.self_attn(x) x = self.post_attn(x) x = self.conv3_1x1(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) _block_registry = dict( basic=BasicBlock, bottle=BottleneckBlock, dark=DarkBlock, edge=EdgeBlock, rep=RepVggBlock, one=MobileOneBlock, self_attn=SelfAttnBlock, ) def register_block(block_type:str, block_fn: nn.Module): _block_registry[block_type] = block_fn def create_block(block: Union[str, nn.Module], **kwargs): if isinstance(block, (nn.Module, partial)): return block(**kwargs) assert block in _block_registry, f'Unknown block type ({block}' return _block_registry[block](**kwargs) class Stem(nn.Sequential): def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 4, pool: str = 'maxpool', num_rep: int = 3, num_act: Optional[int] = None, chs_decay: float = 0.5, layers: LayerFn = None, ): super().__init__() assert stride in (2, 4) layers = layers or LayerFn() if isinstance(out_chs, (list, tuple)): num_rep = len(out_chs) stem_chs = out_chs else: stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1] self.stride = stride self.feature_info = [] # track intermediate features prev_feat = '' stem_strides = [2] + [1] * (num_rep - 1) if stride == 4 and not pool: # set last conv in stack to be strided if stride == 4 and no pooling layer stem_strides[-1] = 2 num_act = num_rep if num_act is None else num_act # if num_act < num_rep, first convs in stack won't have bn + act stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act prev_chs = in_chs curr_stride = 1 for i, (ch, s, na) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)): layer_fn = layers.conv_norm_act if na else create_conv2d conv_name = f'conv{i + 1}' if i > 0 and s > 1: self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat)) self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s)) prev_chs = ch curr_stride *= s prev_feat = conv_name if pool and 'max' in pool.lower(): self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat)) self.add_module('pool', nn.MaxPool2d(3, 2, 1)) curr_stride *= 2 prev_feat = 'pool' self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat)) assert curr_stride == stride def create_byob_stem( in_chs: int, out_chs: int, stem_type: str = '', pool_type: str = '', feat_prefix: str = 'stem', layers: LayerFn = None, ): layers = layers or LayerFn() assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', 'one', '7x7', '3x3') if 'quad' in stem_type: # based on NFNet stem, stack of 4 3x3 convs num_act = 2 if 'quad2' in stem_type else None stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers) elif 'tiered' in stem_type: # 3x3 stack of 3 convs as in my ResNet-T stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers) elif 'deep' in stem_type: # 3x3 stack of 3 convs as in ResNet-D stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers) elif 'rep' in stem_type: stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers) elif 'one' in stem_type: stem = MobileOneBlock(in_chs, out_chs, kernel_size=3, stride=2, layers=layers) elif '7x7' in stem_type: # 7x7 stem conv as in ResNet if pool_type: stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers) else: stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2) else: # 3x3 stem conv as in RegNet is the default if pool_type: stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers) else: stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2) if isinstance(stem, Stem): feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info] else: feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix)] return stem, feature_info def reduce_feat_size(feat_size, stride=2): return None if feat_size is None else tuple([s // stride for s in feat_size]) def override_kwargs(block_kwargs, model_kwargs): """ Override model level attn/self-attn/block kwargs w/ block level NOTE: kwargs are NOT merged across levels, block_kwargs will fully replace model_kwargs for the block if set to anything that isn't None. i.e. an empty block_kwargs dict will remove kwargs set at model level for that block """ out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs return out_kwargs or {} # make sure None isn't returned def update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg, ): layer_fns = block_kwargs['layers'] # override attn layer / args with block local config attn_set = block_cfg.attn_layer is not None if attn_set or block_cfg.attn_kwargs is not None: # override attn layer config if attn_set and not block_cfg.attn_layer: # empty string for attn_layer type will disable attn for this block attn_layer = None else: attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs) attn_layer = block_cfg.attn_layer or model_cfg.attn_layer attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None layer_fns = replace(layer_fns, attn=attn_layer) # override self-attn layer / args with block local cfg self_attn_set = block_cfg.self_attn_layer is not None if self_attn_set or block_cfg.self_attn_kwargs is not None: # override attn layer config if self_attn_set and not block_cfg.self_attn_layer: # attn_layer == '' # empty string for self_attn_layer type will disable attn for this block self_attn_layer = None else: self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs) self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) \ if self_attn_layer is not None else None layer_fns = replace(layer_fns, self_attn=self_attn_layer) block_kwargs['layers'] = layer_fns # add additional block_kwargs specified in block_cfg or model_cfg, precedence to block if set block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs)) def create_byob_stages( cfg: ByoModelCfg, drop_path_rate: float, output_stride: int, stem_feat: Dict[str, Any], feat_size: Optional[int] = None, layers: Optional[LayerFn] = None, block_kwargs_fn: Optional[Callable] = update_block_kwargs, ): layers = layers or LayerFn() feature_info = [] block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks] depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs] dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] dilation = 1 net_stride = stem_feat['reduction'] prev_chs = stem_feat['num_chs'] prev_feat = stem_feat stages = [] for stage_idx, stage_block_cfgs in enumerate(block_cfgs): stride = stage_block_cfgs[0].s if stride != 1 and prev_feat: feature_info.append(prev_feat) if net_stride >= output_stride and stride > 1: dilation *= stride stride = 1 net_stride *= stride first_dilation = 1 if dilation in (1, 2) else 2 blocks = [] for block_idx, block_cfg in enumerate(stage_block_cfgs): out_chs = make_divisible(block_cfg.c * cfg.width_factor) group_size = block_cfg.gs if isinstance(group_size, Callable): group_size = group_size(out_chs, block_idx) block_kwargs = dict( # Blocks used in this model must accept these arguments in_chs=prev_chs, out_chs=out_chs, stride=stride if block_idx == 0 else 1, dilation=(first_dilation, dilation), group_size=group_size, bottle_ratio=block_cfg.br, downsample=cfg.downsample, drop_path_rate=dpr[stage_idx][block_idx], layers=layers, ) if block_cfg.type in ('self_attn',): # add feat_size arg for blocks that support/need it block_kwargs['feat_size'] = feat_size block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg) blocks += [create_block(block_cfg.type, **block_kwargs)] first_dilation = dilation prev_chs = out_chs if stride > 1 and block_idx == 0: feat_size = reduce_feat_size(feat_size, stride) stages += [nn.Sequential(*blocks)] prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}') feature_info.append(prev_feat) return nn.Sequential(*stages), feature_info def get_layer_fns(cfg: ByoModelCfg): act = get_act_layer(cfg.act_layer) norm_act = get_norm_act_layer(norm_layer=cfg.norm_layer, act_layer=act) conv_norm_act = partial(ConvNormAct, norm_layer=cfg.norm_layer, act_layer=act) attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn) return layer_fn class ByobNet(nn.Module): """ 'Bring-your-own-blocks' Net A flexible network backbone that allows building model stem + blocks via dataclass cfg definition w/ factory functions for module instantiation. Current assumption is that both stem and blocks are in conv-bn-act order (w/ block ending in act). """ def __init__( self, cfg: ByoModelCfg, num_classes: int = 1000, in_chans: int = 3, global_pool: str = 'avg', output_stride: int = 32, img_size: Optional[Union[int, Tuple[int, int]]] = None, drop_rate: float = 0., drop_path_rate: float =0., zero_init_last: bool = True, **kwargs, ): """ Args: cfg: Model architecture configuration. num_classes: Number of classifier classes. in_chans: Number of input channels. global_pool: Global pooling type. output_stride: Output stride of network, one of (8, 16, 32). img_size: Image size for fixed image size models (i.e. self-attn). drop_rate: Classifier dropout rate. drop_path_rate: Stochastic depth drop-path rate. zero_init_last: Zero-init last weight of residual path. **kwargs: Extra kwargs overlayed onto cfg. """ super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False cfg = replace(cfg, **kwargs) # overlay kwargs onto cfg layers = get_layer_fns(cfg) if cfg.fixed_input_size: assert img_size is not None, 'img_size argument is required for fixed input size model' feat_size = to_2tuple(img_size) if img_size is not None else None self.feature_info = [] stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor)) self.stem, stem_feat = create_byob_stem(in_chans, stem_chs, cfg.stem_type, cfg.stem_pool, layers=layers) self.feature_info.extend(stem_feat[:-1]) feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction']) self.stages, stage_feat = create_byob_stages( cfg, drop_path_rate, output_stride, stem_feat[-1], layers=layers, feat_size=feat_size, ) self.feature_info.extend(stage_feat[:-1]) prev_chs = stage_feat[-1]['num_chs'] if cfg.num_features: self.num_features = int(round(cfg.width_factor * cfg.num_features)) self.final_conv = layers.conv_norm_act(prev_chs, self.num_features, 1) else: self.num_features = prev_chs self.final_conv = nn.Identity() self.feature_info += [ dict(num_chs=self.num_features, reduction=stage_feat[-1]['reduction'], module='final_conv')] self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, ) # init weights named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', blocks=[ (r'^stages\.(\d+)' if coarse else r'^stages\.(\d+)\.(\d+)', None), (r'^final_conv', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool='avg'): self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) x = self.final_conv(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name='', zero_init_last=False): if isinstance(module, nn.Conv2d): fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels fan_out //= module.groups module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Linear): nn.init.normal_(module.weight, mean=0.0, std=0.01) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.BatchNorm2d): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights(zero_init_last=zero_init_last) model_cfgs = dict( gernet_l=ByoModelCfg( blocks=( ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.), ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.), ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.), ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.), ), stem_chs=32, stem_pool=None, num_features=2560, ), gernet_m=ByoModelCfg( blocks=( ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.), ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.), ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.), ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.), ), stem_chs=32, stem_pool=None, num_features=2560, ), gernet_s=ByoModelCfg( blocks=( ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.), ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.), ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4), ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.), ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.), ), stem_chs=13, stem_pool=None, num_features=1920, ), repvgg_a0=ByoModelCfg( blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(0.75, 0.75, 0.75, 2.5)), stem_type='rep', stem_chs=48, ), repvgg_a1=ByoModelCfg( blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1, 1, 1, 2.5)), stem_type='rep', stem_chs=64, ), repvgg_a2=ByoModelCfg( blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)), stem_type='rep', stem_chs=64, ), repvgg_b0=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(1., 1., 1., 2.5)), stem_type='rep', stem_chs=64, ), repvgg_b1=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.)), stem_type='rep', stem_chs=64, ), repvgg_b1g4=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.), groups=4), stem_type='rep', stem_chs=64, ), repvgg_b2=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.)), stem_type='rep', stem_chs=64, ), repvgg_b2g4=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.), groups=4), stem_type='rep', stem_chs=64, ), repvgg_b3=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.)), stem_type='rep', stem_chs=64, ), repvgg_b3g4=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.), groups=4), stem_type='rep', stem_chs=64, ), repvgg_d2se=ByoModelCfg( blocks=_rep_vgg_bcfg(d=(8, 14, 24, 1), wf=(2.5, 2.5, 2.5, 5.)), stem_type='rep', stem_chs=64, attn_layer='se', attn_kwargs=dict(rd_ratio=0.0625, rd_divisor=1), ), # 4 x conv stem w/ 2 act, no maxpool, 2,4,6,4 repeats, group size 32 in first 3 blocks # DW convs in last block, 2048 pre-FC, silu act resnet51q=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0), ), stem_chs=128, stem_type='quad2', stem_pool=None, num_features=2048, act_layer='silu', ), # 4 x conv stem w/ 4 act, no maxpool, 1,4,6,4 repeats, edge block first, group size 32 in next 2 blocks # DW convs in last block, 4 conv for each bottle block, 2048 pre-FC, silu act resnet61q=ByoModelCfg( blocks=( ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()), ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0), ), stem_chs=128, stem_type='quad', stem_pool=None, num_features=2048, act_layer='silu', block_kwargs=dict(extra_conv=True), ), # A series of ResNeXt-26 models w/ one of none, GC, SE, ECA, BAT attn, group size 32, SiLU act, # and a tiered stem w/ maxpool resnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', ), gcresnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='gca', ), seresnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='se', ), eca_resnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='eca', ), bat_resnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='bat', attn_kwargs=dict(block_size=8) ), # ResNet-32 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, no pre-fc feat layer, tiered stem w/o maxpool resnet32ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', num_features=0, act_layer='silu', ), # ResNet-33 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, 1280 pre-FC feat, tiered stem w/o maxpool resnet33ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', num_features=1280, act_layer='silu', ), # A series of ResNet-33 (2, 3, 3, 2) models w/ one of GC, SE, ECA attn, no groups, SiLU act, 1280 pre-FC feat # and a tiered stem w/ no maxpool gcresnet33ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', num_features=1280, act_layer='silu', attn_layer='gca', ), seresnet33ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', num_features=1280, act_layer='silu', attn_layer='se', ), eca_resnet33ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', num_features=1280, act_layer='silu', attn_layer='eca', ), gcresnet50t=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', attn_layer='gca', ), gcresnext50ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='gca', ), # experimental models, closer to a RegNetZ than a ResNet. Similar to EfficientNets but w/ groups instead of DW regnetz_b16=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3), ), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_c16=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4), ), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_d32=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=32, br=4), ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=32, br=4), ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=32, br=4), ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=32, br=4), ), stem_chs=64, stem_type='tiered', stem_pool='', downsample='', num_features=1792, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_d8=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4), ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4), ), stem_chs=64, stem_type='tiered', stem_pool='', downsample='', num_features=1792, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_e8=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=96, s=1, gs=8, br=4), ByoBlockCfg(type='bottle', d=8, c=192, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=16, c=384, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=8, br=4), ), stem_chs=64, stem_type='tiered', stem_pool='', downsample='', num_features=2048, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), # experimental EvoNorm configs regnetz_b16_evos=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3), ), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', norm_layer=partial(EvoNorm2dS0a, group_size=16), attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_c16_evos=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4), ), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', norm_layer=partial(EvoNorm2dS0a, group_size=16), attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_d8_evos=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4), ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4), ), stem_chs=64, stem_type='deep', stem_pool='', downsample='', num_features=1792, act_layer='silu', norm_layer=partial(EvoNorm2dS0a, group_size=16), attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), mobileone_s0=ByoModelCfg( blocks=_mobileone_bcfg(wf=(0.75, 1.0, 1.0, 2.), num_conv_branches=4), stem_type='one', stem_chs=48, ), mobileone_s1=ByoModelCfg( blocks=_mobileone_bcfg(wf=(1.5, 1.5, 2.0, 2.5)), stem_type='one', stem_chs=64, ), mobileone_s2=ByoModelCfg( blocks=_mobileone_bcfg(wf=(1.5, 2.0, 2.5, 4.0)), stem_type='one', stem_chs=64, ), mobileone_s3=ByoModelCfg( blocks=_mobileone_bcfg(wf=(2.0, 2.5, 3.0, 4.0)), stem_type='one', stem_chs=64, ), mobileone_s4=ByoModelCfg( blocks=_mobileone_bcfg(wf=(3.0, 3.5, 3.5, 4.0), se_blocks=(0, 0, 5, 1)), stem_type='one', stem_chs=64, ), ) def _create_byobnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( ByobNet, variant, pretrained, model_cfg=model_cfgs[variant], feature_cfg=dict(flatten_sequential=True), **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', **kwargs } def _cfgr(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ # GPU-Efficient (ResNet) weights 'gernet_s.idstcv_in1k': _cfg(hf_hub_id='timm/'), 'gernet_m.idstcv_in1k': _cfg(hf_hub_id='timm/'), 'gernet_l.idstcv_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8)), # RepVGG weights 'repvgg_a0.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_a1.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_a2.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b0.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b1.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b1g4.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b2.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b2g4.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b3.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b3g4.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_d2se.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, ), # experimental ResNet configs 'resnet51q.ra2_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth', first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnet61q.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), # ResNeXt-26 models with different attention in Bottleneck blocks 'resnext26ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'seresnext26ts.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'gcresnext26ts.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'eca_resnext26ts.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'bat_resnext26ts.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth', min_input_size=(3, 256, 256)), # ResNet-32 / 33 models with different attention in Bottleneck blocks 'resnet32ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnet33ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'gcresnet33ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'seresnet33ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'eca_resnet33ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'gcresnet50t.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'gcresnext50ts.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), # custom `timm` specific RegNetZ inspired models w/ different sizing from paper 'regnetz_b16.ra3_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_b_raa-677d9606.pth', first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.94, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'regnetz_c16.ra3_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_c_rab2_256-a54bf36a.pth', first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'regnetz_d32.ra3_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d_rab_256-b8073a89.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320)), 'regnetz_d8.ra3_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d8_bh-afc03c55.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'regnetz_e8.ra3_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_e8_bh-aace8e6e.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'regnetz_b16_evos.untrained': _cfgr( first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.95, test_input_size=(3, 288, 288)), 'regnetz_c16_evos.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_c16_evos_ch-d8311942.pth', first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320)), 'regnetz_d8_evos.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_d8_evos_ch-2bc12646.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'mobileone_s0.apple_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.875, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), ), 'mobileone_s1.apple_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.9, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), ), 'mobileone_s2.apple_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.9, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), ), 'mobileone_s3.apple_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.9, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), ), 'mobileone_s4.apple_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.9, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), ), }) @register_model def gernet_l(pretrained=False, **kwargs) -> ByobNet: """ GEResNet-Large (GENet-Large from official impl) `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 """ return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs) @register_model def gernet_m(pretrained=False, **kwargs) -> ByobNet: """ GEResNet-Medium (GENet-Normal from official impl) `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 """ return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs) @register_model def gernet_s(pretrained=False, **kwargs) -> ByobNet: """ EResNet-Small (GENet-Small from official impl) `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 """ return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs) @register_model def repvgg_a0(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-A0 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_a0', pretrained=pretrained, **kwargs) @register_model def repvgg_a1(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-A1 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_a1', pretrained=pretrained, **kwargs) @register_model def repvgg_a2(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-A2 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs) @register_model def repvgg_b0(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B0 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs) @register_model def repvgg_b1(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B1 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs) @register_model def repvgg_b1g4(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B1g4 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs) @register_model def repvgg_b2(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B2 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs) @register_model def repvgg_b2g4(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B2g4 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs) @register_model def repvgg_b3(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B3 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs) @register_model def repvgg_b3g4(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B3g4 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs) @register_model def repvgg_d2se(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-D2se `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_d2se', pretrained=pretrained, **kwargs) @register_model def resnet51q(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs) @register_model def resnet61q(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs) @register_model def resnext26ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs) @register_model def gcresnext26ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs) @register_model def seresnext26ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs) @register_model def eca_resnext26ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs) @register_model def bat_resnext26ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs) @register_model def resnet32ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs) @register_model def resnet33ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs) @register_model def gcresnet33ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs) @register_model def seresnet33ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs) @register_model def eca_resnet33ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs) @register_model def gcresnet50t(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs) @register_model def gcresnext50ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs) @register_model def regnetz_b16(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_b16', pretrained=pretrained, **kwargs) @register_model def regnetz_c16(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_c16', pretrained=pretrained, **kwargs) @register_model def regnetz_d32(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_d32', pretrained=pretrained, **kwargs) @register_model def regnetz_d8(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_d8', pretrained=pretrained, **kwargs) @register_model def regnetz_e8(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_e8', pretrained=pretrained, **kwargs) @register_model def regnetz_b16_evos(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_b16_evos', pretrained=pretrained, **kwargs) @register_model def regnetz_c16_evos(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_c16_evos', pretrained=pretrained, **kwargs) @register_model def regnetz_d8_evos(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_d8_evos', pretrained=pretrained, **kwargs) @register_model def mobileone_s0(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('mobileone_s0', pretrained=pretrained, **kwargs) @register_model def mobileone_s1(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('mobileone_s1', pretrained=pretrained, **kwargs) @register_model def mobileone_s2(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('mobileone_s2', pretrained=pretrained, **kwargs) @register_model def mobileone_s3(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('mobileone_s3', pretrained=pretrained, **kwargs) @register_model def mobileone_s4(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('mobileone_s4', pretrained=pretrained, **kwargs)
pytorch-image-models/timm/models/byobnet.py/0
{ "file_path": "pytorch-image-models/timm/models/byobnet.py", "repo_id": "pytorch-image-models", "token_count": 42793 }
183
""" The EfficientNet Family in PyTorch An implementation of EfficienNet that covers variety of related models with efficient architectures: * EfficientNet-V2 - `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 * EfficientNet (B0-B8, L2 + Tensorflow pretrained AutoAug/RandAug/AdvProp/NoisyStudent weight ports) - EfficientNet: Rethinking Model Scaling for CNNs - https://arxiv.org/abs/1905.11946 - CondConv: Conditionally Parameterized Convolutions for Efficient Inference - https://arxiv.org/abs/1904.04971 - Adversarial Examples Improve Image Recognition - https://arxiv.org/abs/1911.09665 - Self-training with Noisy Student improves ImageNet classification - https://arxiv.org/abs/1911.04252 * MixNet (Small, Medium, and Large) - MixConv: Mixed Depthwise Convolutional Kernels - https://arxiv.org/abs/1907.09595 * MNasNet B1, A1 (SE), Small - MnasNet: Platform-Aware Neural Architecture Search for Mobile - https://arxiv.org/abs/1807.11626 * FBNet-C - FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable NAS - https://arxiv.org/abs/1812.03443 * Single-Path NAS Pixel1 - Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877 * TinyNet - Model Rubik's Cube: Twisting Resolution, Depth and Width for TinyNets - https://arxiv.org/abs/2010.14819 - Definitions & weights borrowed from https://github.com/huawei-noah/CV-Backbones/tree/master/tinynet_pytorch * And likely more... The majority of the above models (EfficientNet*, MixNet, MnasNet) and original weights were made available by Mingxing Tan, Quoc Le, and other members of their Google Brain team. Thanks for consistently releasing the models and weights open source! Hacked together by / Copyright 2019, Ross Wightman """ from functools import partial from typing import List import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import create_conv2d, create_classifier, get_norm_act_layer, GroupNormAct from ._builder import build_model_with_cfg, pretrained_cfg_for_features from ._efficientnet_blocks import SqueezeExcite from ._efficientnet_builder import EfficientNetBuilder, decode_arch_def, efficientnet_init_weights, \ round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT from ._features import FeatureInfo, FeatureHooks from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['EfficientNet', 'EfficientNetFeatures'] class EfficientNet(nn.Module): """ EfficientNet A flexible and performant PyTorch implementation of efficient network architectures, including: * EfficientNet-V2 Small, Medium, Large, XL & B0-B3 * EfficientNet B0-B8, L2 * EfficientNet-EdgeTPU * EfficientNet-CondConv * MixNet S, M, L, XL * MnasNet A1, B1, and small * MobileNet-V2 * FBNet C * Single-Path NAS Pixel1 * TinyNet """ def __init__( self, block_args, num_classes=1000, num_features=1280, in_chans=3, stem_size=32, fix_stem=False, output_stride=32, pad_type='', round_chs_fn=round_channels, act_layer=None, norm_layer=None, se_layer=None, drop_rate=0., drop_path_rate=0., global_pool='avg' ): super(EfficientNet, self).__init__() act_layer = act_layer or nn.ReLU norm_layer = norm_layer or nn.BatchNorm2d norm_act_layer = get_norm_act_layer(norm_layer, act_layer) se_layer = se_layer or SqueezeExcite self.num_classes = num_classes self.num_features = num_features self.drop_rate = drop_rate self.grad_checkpointing = False # Stem if not fix_stem: stem_size = round_chs_fn(stem_size) self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) self.bn1 = norm_act_layer(stem_size, inplace=True) # Middle stages (IR/ER/DS Blocks) builder = EfficientNetBuilder( output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, ) self.blocks = nn.Sequential(*builder(stem_size, block_args)) self.feature_info = builder.features head_chs = builder.in_chs # Head + Pooling self.conv_head = create_conv2d(head_chs, self.num_features, 1, padding=pad_type) self.bn2 = norm_act_layer(self.num_features, inplace=True) self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) efficientnet_init_weights(self) def as_sequential(self): layers = [self.conv_stem, self.bn1] layers.extend(self.blocks) layers.extend([self.conv_head, self.bn2, self.global_pool]) layers.extend([nn.Dropout(self.drop_rate), self.classifier]) return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^conv_stem|bn1', blocks=[ (r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None), (r'conv_head|bn2', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.classifier def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.conv_stem(x) x = self.bn1(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x, flatten=True) else: x = self.blocks(x) x = self.conv_head(x) x = self.bn2(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) return x if pre_logits else self.classifier(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x class EfficientNetFeatures(nn.Module): """ EfficientNet Feature Extractor A work-in-progress feature extraction module for EfficientNet, to use as a backbone for segmentation and object detection models. """ def __init__( self, block_args, out_indices=(0, 1, 2, 3, 4), feature_location='bottleneck', in_chans=3, stem_size=32, fix_stem=False, output_stride=32, pad_type='', round_chs_fn=round_channels, act_layer=None, norm_layer=None, se_layer=None, drop_rate=0., drop_path_rate=0. ): super(EfficientNetFeatures, self).__init__() act_layer = act_layer or nn.ReLU norm_layer = norm_layer or nn.BatchNorm2d norm_act_layer = get_norm_act_layer(norm_layer, act_layer) se_layer = se_layer or SqueezeExcite self.drop_rate = drop_rate self.grad_checkpointing = False # Stem if not fix_stem: stem_size = round_chs_fn(stem_size) self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) self.bn1 = norm_act_layer(stem_size, inplace=True) # Middle stages (IR/ER/DS Blocks) builder = EfficientNetBuilder( output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, feature_location=feature_location, ) self.blocks = nn.Sequential(*builder(stem_size, block_args)) self.feature_info = FeatureInfo(builder.features, out_indices) self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()} efficientnet_init_weights(self) # Register feature extraction hooks with FeatureHooks helper self.feature_hooks = None if feature_location != 'bottleneck': hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) self.feature_hooks = FeatureHooks(hooks, self.named_modules()) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable def forward(self, x) -> List[torch.Tensor]: x = self.conv_stem(x) x = self.bn1(x) if self.feature_hooks is None: features = [] if 0 in self._stage_out_idx: features.append(x) # add stem out for i, b in enumerate(self.blocks): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(b, x) else: x = b(x) if i + 1 in self._stage_out_idx: features.append(x) return features else: self.blocks(x) out = self.feature_hooks.get_output(x.device) return list(out.values()) def _create_effnet(variant, pretrained=False, **kwargs): features_mode = '' model_cls = EfficientNet kwargs_filter = None if kwargs.pop('features_only', False): if 'feature_cfg' in kwargs: features_mode = 'cfg' else: kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'global_pool') model_cls = EfficientNetFeatures features_mode = 'cls' model = build_model_with_cfg( model_cls, variant, pretrained, features_only=features_mode == 'cfg', pretrained_strict=features_mode != 'cls', kwargs_filter=kwargs_filter, **kwargs, ) if features_mode == 'cls': model.pretrained_cfg = model.default_cfg = pretrained_cfg_for_features(model.pretrained_cfg) return model def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a mnasnet-a1 model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet Paper: https://arxiv.org/pdf/1807.11626.pdf. Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c16_noskip'], # stage 1, 112x112 in ['ir_r2_k3_s2_e6_c24'], # stage 2, 56x56 in ['ir_r3_k5_s2_e3_c40_se0.25'], # stage 3, 28x28 in ['ir_r4_k3_s2_e6_c80'], # stage 4, 14x14in ['ir_r2_k3_s1_e6_c112_se0.25'], # stage 5, 14x14in ['ir_r3_k5_s2_e6_c160_se0.25'], # stage 6, 7x7 in ['ir_r1_k3_s1_e6_c320'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a mnasnet-b1 model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet Paper: https://arxiv.org/pdf/1807.11626.pdf. Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_c16_noskip'], # stage 1, 112x112 in ['ir_r3_k3_s2_e3_c24'], # stage 2, 56x56 in ['ir_r3_k5_s2_e3_c40'], # stage 3, 28x28 in ['ir_r3_k5_s2_e6_c80'], # stage 4, 14x14in ['ir_r2_k3_s1_e6_c96'], # stage 5, 14x14in ['ir_r4_k5_s2_e6_c192'], # stage 6, 7x7 in ['ir_r1_k3_s1_e6_c320_noskip'] ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a mnasnet-b1 model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet Paper: https://arxiv.org/pdf/1807.11626.pdf. Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ ['ds_r1_k3_s1_c8'], ['ir_r1_k3_s2_e3_c16'], ['ir_r2_k3_s2_e6_c16'], ['ir_r4_k5_s2_e6_c32_se0.25'], ['ir_r3_k3_s1_e6_c32_se0.25'], ['ir_r3_k5_s2_e6_c88_se0.25'], ['ir_r1_k3_s1_e6_c144'] ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=8, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mobilenet_v2( variant, channel_multiplier=1.0, depth_multiplier=1.0, fix_stem_head=False, pretrained=False, **kwargs): """ Generate MobileNet-V2 network Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py Paper: https://arxiv.org/abs/1801.04381 """ arch_def = [ ['ds_r1_k3_s1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r3_k3_s2_e6_c32'], ['ir_r4_k3_s2_e6_c64'], ['ir_r3_k3_s1_e6_c96'], ['ir_r3_k3_s2_e6_c160'], ['ir_r1_k3_s1_e6_c320'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head), num_features=1280 if fix_stem_head else max(1280, round_chs_fn(1280)), stem_size=32, fix_stem=fix_stem_head, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'relu6'), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """ FBNet-C Paper: https://arxiv.org/abs/1812.03443 Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper, it was used to confirm some building block details """ arch_def = [ ['ir_r1_k3_s1_e1_c16'], ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], ['ir_r4_k5_s2_e6_c184'], ['ir_r1_k3_s1_e6_c352'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=16, num_features=1984, # paper suggests this, but is not 100% clear round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates the Single-Path NAS model from search targeted for Pixel1 phone. Paper: https://arxiv.org/abs/1904.02877 Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_c16_noskip'], # stage 1, 112x112 in ['ir_r3_k3_s2_e3_c24'], # stage 2, 56x56 in ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], # stage 3, 28x28 in ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], # stage 4, 14x14in ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], # stage 5, 14x14in ['ir_r4_k5_s2_e6_c192'], # stage 6, 7x7 in ['ir_r1_k3_s1_e6_c320_noskip'] ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet( variant, channel_multiplier=1.0, depth_multiplier=1.0, channel_divisor=8, group_size=None, pretrained=False, **kwargs): """Creates an EfficientNet model. Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py Paper: https://arxiv.org/abs/1905.11946 EfficientNet params name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) 'efficientnet-b0': (1.0, 1.0, 224, 0.2), 'efficientnet-b1': (1.0, 1.1, 240, 0.2), 'efficientnet-b2': (1.1, 1.2, 260, 0.3), 'efficientnet-b3': (1.2, 1.4, 300, 0.3), 'efficientnet-b4': (1.4, 1.8, 380, 0.4), 'efficientnet-b5': (1.6, 2.2, 456, 0.4), 'efficientnet-b6': (1.8, 2.6, 528, 0.5), 'efficientnet-b7': (2.0, 3.1, 600, 0.5), 'efficientnet-b8': (2.2, 3.6, 672, 0.5), 'efficientnet-l2': (4.3, 5.3, 800, 0.5), Args: channel_multiplier: multiplier to number of channels per layer depth_multiplier: multiplier to number of repeats per stage """ arch_def = [ ['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, divisor=channel_divisor) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, act_layer=resolve_act_layer(kwargs, 'swish'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_edge( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs): """ Creates an EfficientNet-EdgeTPU model Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/edgetpu """ arch_def = [ # NOTE `fc` is present to override a mismatch between stem channels and in chs not # present in other models ['er_r1_k3_s1_e4_c24_fc24_noskip'], ['er_r2_k3_s2_e8_c32'], ['er_r4_k3_s2_e8_c48'], ['ir_r5_k5_s2_e8_c96'], ['ir_r4_k5_s1_e8_c144'], ['ir_r2_k5_s2_e8_c192'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'relu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_condconv( variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs): """Creates an EfficientNet-CondConv model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv """ arch_def = [ ['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], ['ir_r4_k5_s2_e6_c192_se0.25_cc4'], ['ir_r1_k3_s1_e6_c320_se0.25_cc4'], ] # NOTE unlike official impl, this one uses `cc<x>` option where x is the base number of experts for each stage and # the expert_multiplier increases that on a per-model basis as with depth/channel multipliers round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'swish'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """Creates an EfficientNet-Lite model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite Paper: https://arxiv.org/abs/1905.11946 EfficientNet params name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) 'efficientnet-lite0': (1.0, 1.0, 224, 0.2), 'efficientnet-lite1': (1.0, 1.1, 240, 0.2), 'efficientnet-lite2': (1.1, 1.2, 260, 0.3), 'efficientnet-lite3': (1.2, 1.4, 280, 0.3), 'efficientnet-lite4': (1.4, 1.8, 300, 0.3), Args: channel_multiplier: multiplier to number of channels per layer depth_multiplier: multiplier to number of repeats per stage """ arch_def = [ ['ds_r1_k3_s1_e1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r2_k5_s2_e6_c40'], ['ir_r3_k3_s2_e6_c80'], ['ir_r3_k5_s1_e6_c112'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), num_features=1280, stem_size=32, fix_stem=True, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), act_layer=resolve_act_layer(kwargs, 'relu6'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_base( variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """ Creates an EfficientNet-V2 base model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 """ arch_def = [ ['cn_r1_k3_s1_e1_c16_skip'], ['er_r2_k3_s2_e4_c32'], ['er_r2_k3_s2_e4_c48'], ['ir_r3_k3_s2_e4_c96_se0.25'], ['ir_r5_k3_s1_e6_c112_se0.25'], ['ir_r8_k3_s2_e6_c192_se0.25'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_s( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, rw=False, pretrained=False, **kwargs): """ Creates an EfficientNet-V2 Small model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 NOTE: `rw` flag sets up 'small' variant to behave like my initial v2 small model, before ref the impl was released. """ arch_def = [ ['cn_r2_k3_s1_e1_c24_skip'], ['er_r4_k3_s2_e4_c48'], ['er_r4_k3_s2_e4_c64'], ['ir_r6_k3_s2_e4_c128_se0.25'], ['ir_r9_k3_s1_e6_c160_se0.25'], ['ir_r15_k3_s2_e6_c256_se0.25'], ] num_features = 1280 if rw: # my original variant, based on paper figure differs from the official release arch_def[0] = ['er_r2_k3_s1_e1_c24'] arch_def[-1] = ['ir_r15_k3_s2_e6_c272_se0.25'] num_features = 1792 round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(num_features), stem_size=24, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """ Creates an EfficientNet-V2 Medium model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 """ arch_def = [ ['cn_r3_k3_s1_e1_c24_skip'], ['er_r5_k3_s2_e4_c48'], ['er_r5_k3_s2_e4_c80'], ['ir_r7_k3_s2_e4_c160_se0.25'], ['ir_r14_k3_s1_e6_c176_se0.25'], ['ir_r18_k3_s2_e6_c304_se0.25'], ['ir_r5_k3_s1_e6_c512_se0.25'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier), num_features=1280, stem_size=24, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_l(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """ Creates an EfficientNet-V2 Large model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 """ arch_def = [ ['cn_r4_k3_s1_e1_c32_skip'], ['er_r7_k3_s2_e4_c64'], ['er_r7_k3_s2_e4_c96'], ['ir_r10_k3_s2_e4_c192_se0.25'], ['ir_r19_k3_s1_e6_c224_se0.25'], ['ir_r25_k3_s2_e6_c384_se0.25'], ['ir_r7_k3_s1_e6_c640_se0.25'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier), num_features=1280, stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_xl(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """ Creates an EfficientNet-V2 Xtra-Large model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 """ arch_def = [ ['cn_r4_k3_s1_e1_c32_skip'], ['er_r8_k3_s2_e4_c64'], ['er_r8_k3_s2_e4_c96'], ['ir_r16_k3_s2_e4_c192_se0.25'], ['ir_r24_k3_s1_e6_c256_se0.25'], ['ir_r32_k3_s2_e6_c512_se0.25'], ['ir_r8_k3_s1_e6_c640_se0.25'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier), num_features=1280, stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a MixNet Small model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet Paper: https://arxiv.org/abs/1907.09595 """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c16'], # relu # stage 1, 112x112 in ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu # stage 2, 56x56 in ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish # stage 3, 28x28 in ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish # stage 4, 14x14in ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish # stage 5, 14x14in ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish # 7x7 ] model_kwargs = dict( block_args=decode_arch_def(arch_def), num_features=1536, stem_size=16, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """Creates a MixNet Medium-Large model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet Paper: https://arxiv.org/abs/1907.09595 """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c24'], # relu # stage 1, 112x112 in ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu # stage 2, 56x56 in ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish # stage 3, 28x28 in ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish # stage 4, 14x14in ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish # stage 5, 14x14in ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish # 7x7 ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), num_features=1536, stem_size=24, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_tinynet( variant, model_width=1.0, depth_multiplier=1.0, pretrained=False, **kwargs ): """Creates a TinyNet model. """ arch_def = [ ['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), num_features=max(1280, round_channels(1280, model_width, 8, None)), stem_size=32, fix_stem=True, round_chs_fn=partial(round_channels, multiplier=model_width), act_layer=resolve_act_layer(kwargs, 'swish'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'mnasnet_050.untrained': _cfg(), 'mnasnet_075.untrained': _cfg(), 'mnasnet_100.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth', hf_hub_id='timm/'), 'mnasnet_140.untrained': _cfg(), 'semnasnet_050.untrained': _cfg(), 'semnasnet_075.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/semnasnet_075-18710866.pth', hf_hub_id='timm/'), 'semnasnet_100.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth', hf_hub_id='timm/'), 'semnasnet_140.untrained': _cfg(), 'mnasnet_small.lamb_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_small_lamb-aff75073.pth', hf_hub_id='timm/'), 'mobilenetv2_035.untrained': _cfg(), 'mobilenetv2_050.lamb_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_050-3d30d450.pth', hf_hub_id='timm/', interpolation='bicubic', ), 'mobilenetv2_075.untrained': _cfg(), 'mobilenetv2_100.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth', hf_hub_id='timm/'), 'mobilenetv2_110d.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth', hf_hub_id='timm/'), 'mobilenetv2_120d.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth', hf_hub_id='timm/'), 'mobilenetv2_140.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth', hf_hub_id='timm/'), 'fbnetc_100.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth', hf_hub_id='timm/', interpolation='bilinear'), 'spnasnet_100.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth', hf_hub_id='timm/', interpolation='bilinear'), # NOTE experimenting with alternate attention 'efficientnet_b0.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth', hf_hub_id='timm/'), 'efficientnet_b1.ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth', hf_hub_id='timm/', test_input_size=(3, 256, 256), crop_pct=1.0), 'efficientnet_b2.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), crop_pct=1.0), 'efficientnet_b3.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth', hf_hub_id='timm/', input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), 'efficientnet_b4.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b4_ra2_320-7eb33cd5.pth', hf_hub_id='timm/', input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), crop_pct=1.0), 'efficientnet_b5.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, crop_mode='squash'), 'efficientnet_b5.sw_in12k': _cfg( hf_hub_id='timm/', input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.95, num_classes=11821), 'efficientnet_b6.untrained': _cfg( url='', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'efficientnet_b7.untrained': _cfg( url='', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'efficientnet_b8.untrained': _cfg( url='', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), 'efficientnet_l2.untrained': _cfg( url='', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.961), # FIXME experimental 'efficientnet_b0_gn.untrained': _cfg(), 'efficientnet_b0_g8_gn.untrained': _cfg(), 'efficientnet_b0_g16_evos.untrained': _cfg(), 'efficientnet_b3_gn.untrained': _cfg( input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), 'efficientnet_b3_g8_gn.untrained': _cfg( input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), 'efficientnet_es.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth', hf_hub_id='timm/'), 'efficientnet_em.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'efficientnet_el.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el-3b455510.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'efficientnet_es_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_pruned75-1b7248cf.pth', hf_hub_id='timm/'), 'efficientnet_el_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el_pruned70-ef2a2ccf.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'efficientnet_cc_b0_4e.untrained': _cfg(), 'efficientnet_cc_b0_8e.untrained': _cfg(), 'efficientnet_cc_b1_8e.untrained': _cfg(input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'efficientnet_lite0.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth', hf_hub_id='timm/'), 'efficientnet_lite1.untrained': _cfg( input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'efficientnet_lite2.untrained': _cfg( input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'efficientnet_lite3.untrained': _cfg( input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'efficientnet_lite4.untrained': _cfg( input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'efficientnet_b1_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb1_pruned-bea43a3a.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'efficientnet_b2_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb2_pruned-08c1b27c.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'efficientnet_b3_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb3_pruned-59ecf72d.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'efficientnetv2_rw_t.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_t_agc-3620981a.pth', hf_hub_id='timm/', input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), 'gc_efficientnetv2_rw_t.agc_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gc_efficientnetv2_rw_t_agc-927a0bde.pth', hf_hub_id='timm/', input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), 'efficientnetv2_rw_s.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_v2s_ra2_288-a6477665.pth', hf_hub_id='timm/', input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), 'efficientnetv2_rw_m.agc_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_rw_m_agc-3d90cb1e.pth', hf_hub_id='timm/', input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), 'efficientnetv2_s.untrained': _cfg( input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), 'efficientnetv2_m.untrained': _cfg( input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), 'efficientnetv2_l.untrained': _cfg( input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), 'efficientnetv2_xl.untrained': _cfg( input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), 'tf_efficientnet_b0.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth', hf_hub_id='timm/', input_size=(3, 224, 224)), 'tf_efficientnet_b1.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'tf_efficientnet_b3.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth', hf_hub_id='timm/', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b6.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth', hf_hub_id='timm/', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'tf_efficientnet_b7.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth', hf_hub_id='timm/', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_l2.ns_jft_in1k_475': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth', hf_hub_id='timm/', input_size=(3, 475, 475), pool_size=(15, 15), crop_pct=0.936), 'tf_efficientnet_l2.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth', hf_hub_id='timm/', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.96), 'tf_efficientnet_b0.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 224, 224)), 'tf_efficientnet_b1.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'tf_efficientnet_b3.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b6.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'tf_efficientnet_b7.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_b8.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), 'tf_efficientnet_b5.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b7.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth', hf_hub_id='timm/', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_b8.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth', hf_hub_id='timm/', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), 'tf_efficientnet_b0.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth', hf_hub_id='timm/', input_size=(3, 224, 224)), 'tf_efficientnet_b1.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'tf_efficientnet_b3.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth', hf_hub_id='timm/', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.aa_in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_aa-99018a74.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b6.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth', hf_hub_id='timm/', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'tf_efficientnet_b7.aa_in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_aa-076e3472.pth', hf_hub_id='timm/', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_b0.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0-0af12548.pth', hf_hub_id='timm/', input_size=(3, 224, 224)), 'tf_efficientnet_b1.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1-5c1377c4.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2-e393ef04.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'tf_efficientnet_b3.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3-e3bd6955.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4-74ee3bed.pth', hf_hub_id='timm/', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5-c6949ce9.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_es.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 224, 224), ), 'tf_efficientnet_em.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_el.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_cc_b0_4e.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_efficientnet_cc_b0_8e.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_efficientnet_cc_b1_8e.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_lite0.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res ), 'tf_efficientnet_lite1.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res ), 'tf_efficientnet_lite2.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res ), 'tf_efficientnet_lite3.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, interpolation='bilinear'), 'tf_efficientnet_lite4.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.920, interpolation='bilinear'), 'tf_efficientnetv2_s.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), 'tf_efficientnetv2_m.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_l.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_xl.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21ft1k-06c35c48.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_s.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s-eb54923e.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), 'tf_efficientnetv2_m.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m-cc09e0cd.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_l.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l-d664b728.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_s.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21k-6337ad01.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), 'tf_efficientnetv2_m.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21k-361418a2.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_l.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21k-91a19ec9.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_xl.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21k-fd7e8abf.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_b0.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b0-c7cc451f.pth', hf_hub_id='timm/', input_size=(3, 192, 192), test_input_size=(3, 224, 224), pool_size=(6, 6)), 'tf_efficientnetv2_b1.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b1-be6e41b0.pth', hf_hub_id='timm/', input_size=(3, 192, 192), test_input_size=(3, 240, 240), pool_size=(6, 6), crop_pct=0.882), 'tf_efficientnetv2_b2.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b2-847de54e.pth', hf_hub_id='timm/', input_size=(3, 208, 208), test_input_size=(3, 260, 260), pool_size=(7, 7), crop_pct=0.890), 'tf_efficientnetv2_b3.in21k_ft_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.9, crop_mode='squash'), 'tf_efficientnetv2_b3.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b3-57773f13.pth', hf_hub_id='timm/', input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), 'tf_efficientnetv2_b3.in21k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, num_classes=21843, input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), 'mixnet_s.ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth', hf_hub_id='timm/'), 'mixnet_m.ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth', hf_hub_id='timm/'), 'mixnet_l.ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth', hf_hub_id='timm/'), 'mixnet_xl.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth', hf_hub_id='timm/'), 'mixnet_xxl.untrained': _cfg(), 'tf_mixnet_s.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth', hf_hub_id='timm/'), 'tf_mixnet_m.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth', hf_hub_id='timm/'), 'tf_mixnet_l.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth', hf_hub_id='timm/'), "tinynet_a.in1k": _cfg( input_size=(3, 192, 192), pool_size=(6, 6), # int(224 * 0.86) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_a.pth', hf_hub_id='timm/'), "tinynet_b.in1k": _cfg( input_size=(3, 188, 188), pool_size=(6, 6), # int(224 * 0.84) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_b.pth', hf_hub_id='timm/'), "tinynet_c.in1k": _cfg( input_size=(3, 184, 184), pool_size=(6, 6), # int(224 * 0.825) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_c.pth', hf_hub_id='timm/'), "tinynet_d.in1k": _cfg( input_size=(3, 152, 152), pool_size=(5, 5), # int(224 * 0.68) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_d.pth', hf_hub_id='timm/'), "tinynet_e.in1k": _cfg( input_size=(3, 106, 106), pool_size=(4, 4), # int(224 * 0.475) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_e.pth', hf_hub_id='timm/'), }) @register_model def mnasnet_050(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 0.5. """ model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_075(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 0.75. """ model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_100(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 1.0. """ model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_140(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 1.4 """ model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_050(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 0.5 """ model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_075(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 0.75. """ model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_100(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_140(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 1.4. """ model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_small(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet Small, depth multiplier of 1.0. """ model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_035(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 0.35 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_035', 0.35, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_050(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 0.5 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_075(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 0.75 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_100(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 1.0 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_140(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 1.4 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_110d(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers""" model = _gen_mobilenet_v2( 'mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_120d(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers """ model = _gen_mobilenet_v2( 'mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs) return model @register_model def fbnetc_100(pretrained=False, **kwargs) -> EfficientNet: """ FBNet-C """ if pretrained: # pretrained model trained with non-default BN epsilon kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def spnasnet_100(pretrained=False, **kwargs) -> EfficientNet: """ Single-Path NAS Pixel1""" model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B1 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B2 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b4(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B4 """ # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b5(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B5 """ # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b6(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B6 """ # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b7(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B7 """ # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b8(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B8 """ # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_l2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-L2.""" # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) return model # FIXME experimental group cong / GroupNorm / EvoNorm experiments @register_model def efficientnet_b0_gn(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 + GroupNorm""" model = _gen_efficientnet( 'efficientnet_b0_gn', norm_layer=partial(GroupNormAct, group_size=8), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b0_g8_gn(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 w/ group conv + GroupNorm""" model = _gen_efficientnet( 'efficientnet_b0_g8_gn', group_size=8, norm_layer=partial(GroupNormAct, group_size=8), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b0_g16_evos(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 w/ group 16 conv + EvoNorm""" model = _gen_efficientnet( 'efficientnet_b0_g16_evos', group_size=16, channel_divisor=16, pretrained=pretrained, **kwargs) #norm_layer=partial(EvoNorm2dS0, group_size=16), return model @register_model def efficientnet_b3_gn(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 w/ GroupNorm """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b3_gn', channel_multiplier=1.2, depth_multiplier=1.4, channel_divisor=16, norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b3_g8_gn(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 w/ grouped conv + BN""" # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b3_g8_gn', channel_multiplier=1.2, depth_multiplier=1.4, group_size=8, channel_divisor=16, norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_es(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge Small. """ model = _gen_efficientnet_edge( 'efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_es_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge Small Pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" model = _gen_efficientnet_edge( 'efficientnet_es_pruned', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_em(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Medium. """ model = _gen_efficientnet_edge( 'efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_el(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Large. """ model = _gen_efficientnet_edge( 'efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_el_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Large pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" model = _gen_efficientnet_edge( 'efficientnet_el_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_cc_b0_4e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B0 w/ 8 Experts """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_condconv( 'efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_cc_b0_8e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B0 w/ 8 Experts """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_condconv( 'efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_cc_b1_8e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B1 w/ 8 Experts """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_condconv( 'efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite0 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite1 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite2 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite3 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite4(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite4 """ # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b1_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B1 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') variant = 'efficientnet_b1_pruned' model = _gen_efficientnet( variant, channel_multiplier=1.0, depth_multiplier=1.1, pruned=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b2_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B2 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'efficientnet_b2_pruned', channel_multiplier=1.1, depth_multiplier=1.2, pruned=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b3_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pruned=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_rw_t(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Tiny (Custom variant, tiny not in paper). """ model = _gen_efficientnetv2_s( 'efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, pretrained=pretrained, **kwargs) return model @register_model def gc_efficientnetv2_rw_t(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Tiny w/ Global Context Attn (Custom variant, tiny not in paper). """ model = _gen_efficientnetv2_s( 'gc_efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, se_layer='gc', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_rw_s(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Small (RW variant). NOTE: This is my initial (pre official code release) w/ some differences. See efficientnetv2_s and tf_efficientnetv2_s for versions that match the official w/ PyTorch vs TF padding """ model = _gen_efficientnetv2_s('efficientnetv2_rw_s', rw=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_rw_m(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Medium (RW variant). """ model = _gen_efficientnetv2_s( 'efficientnetv2_rw_m', channel_multiplier=1.2, depth_multiplier=(1.2,) * 4 + (1.6,) * 2, rw=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_s(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Small. """ model = _gen_efficientnetv2_s('efficientnetv2_s', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_m(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Medium. """ model = _gen_efficientnetv2_m('efficientnetv2_m', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_l(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Large. """ model = _gen_efficientnetv2_l('efficientnetv2_l', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_xl(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Xtra-Large. """ model = _gen_efficientnetv2_xl('efficientnetv2_xl', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B1. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B2. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b4(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B4. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b5(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B5. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b6(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B6. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.5 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b7(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B7. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.5 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b8(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B8. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.5 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_l2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-L2 NoisyStudent. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.5 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_es(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge Small. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_edge( 'tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_em(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Medium. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_edge( 'tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_el(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Large. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_edge( 'tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B0 w/ 4 Experts. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_condconv( 'tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B0 w/ 8 Experts. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_condconv( 'tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B1 w/ 8 Experts. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_condconv( 'tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite0 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite( 'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite1 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite( 'tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite2 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite( 'tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite3 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite( 'tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite4(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite4 """ # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite( 'tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_s(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Small. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_s('tf_efficientnetv2_s', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_m(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Medium. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_m('tf_efficientnetv2_m', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_l(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Large. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_l('tf_efficientnetv2_l', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_xl(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Xtra-Large. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2-B0. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_base('tf_efficientnetv2_b0', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2-B1. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_base( 'tf_efficientnetv2_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2-B2. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_base( 'tf_efficientnetv2_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2-B3. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_base( 'tf_efficientnetv2_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def mixnet_s(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Small model. """ model = _gen_mixnet_s( 'mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def mixnet_m(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Medium model. """ model = _gen_mixnet_m( 'mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def mixnet_l(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Large model. """ model = _gen_mixnet_m( 'mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) return model @register_model def mixnet_xl(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Extra-Large model. Not a paper spec, experimental def by RW w/ depth scaling. """ model = _gen_mixnet_m( 'mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def mixnet_xxl(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Double Extra Large model. Not a paper spec, experimental def by RW w/ depth scaling. """ model = _gen_mixnet_m( 'mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs) return model @register_model def tf_mixnet_s(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Small model. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mixnet_s( 'tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mixnet_m(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Medium model. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mixnet_m( 'tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mixnet_l(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Large model. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mixnet_m( 'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) return model @register_model def tinynet_a(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_a', 1.0, 1.2, pretrained=pretrained, **kwargs) return model @register_model def tinynet_b(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_b', 0.75, 1.1, pretrained=pretrained, **kwargs) return model @register_model def tinynet_c(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_c', 0.54, 0.85, pretrained=pretrained, **kwargs) return model @register_model def tinynet_d(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_d', 0.54, 0.695, pretrained=pretrained, **kwargs) return model @register_model def tinynet_e(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_e', 0.51, 0.6, pretrained=pretrained, **kwargs) return model register_model_deprecations(__name__, { 'tf_efficientnet_b0_ap': 'tf_efficientnet_b0.ap_in1k', 'tf_efficientnet_b1_ap': 'tf_efficientnet_b1.ap_in1k', 'tf_efficientnet_b2_ap': 'tf_efficientnet_b2.ap_in1k', 'tf_efficientnet_b3_ap': 'tf_efficientnet_b3.ap_in1k', 'tf_efficientnet_b4_ap': 'tf_efficientnet_b4.ap_in1k', 'tf_efficientnet_b5_ap': 'tf_efficientnet_b5.ap_in1k', 'tf_efficientnet_b6_ap': 'tf_efficientnet_b6.ap_in1k', 'tf_efficientnet_b7_ap': 'tf_efficientnet_b7.ap_in1k', 'tf_efficientnet_b8_ap': 'tf_efficientnet_b8.ap_in1k', 'tf_efficientnet_b0_ns': 'tf_efficientnet_b0.ns_jft_in1k', 'tf_efficientnet_b1_ns': 'tf_efficientnet_b1.ns_jft_in1k', 'tf_efficientnet_b2_ns': 'tf_efficientnet_b2.ns_jft_in1k', 'tf_efficientnet_b3_ns': 'tf_efficientnet_b3.ns_jft_in1k', 'tf_efficientnet_b4_ns': 'tf_efficientnet_b4.ns_jft_in1k', 'tf_efficientnet_b5_ns': 'tf_efficientnet_b5.ns_jft_in1k', 'tf_efficientnet_b6_ns': 'tf_efficientnet_b6.ns_jft_in1k', 'tf_efficientnet_b7_ns': 'tf_efficientnet_b7.ns_jft_in1k', 'tf_efficientnet_l2_ns_475': 'tf_efficientnet_l2.ns_jft_in1k_475', 'tf_efficientnet_l2_ns': 'tf_efficientnet_l2.ns_jft_in1k', 'tf_efficientnetv2_s_in21ft1k': 'tf_efficientnetv2_s.in21k_ft_in1k', 'tf_efficientnetv2_m_in21ft1k': 'tf_efficientnetv2_m.in21k_ft_in1k', 'tf_efficientnetv2_l_in21ft1k': 'tf_efficientnetv2_l.in21k_ft_in1k', 'tf_efficientnetv2_xl_in21ft1k': 'tf_efficientnetv2_xl.in21k_ft_in1k', 'tf_efficientnetv2_s_in21k': 'tf_efficientnetv2_s.in21k', 'tf_efficientnetv2_m_in21k': 'tf_efficientnetv2_m.in21k', 'tf_efficientnetv2_l_in21k': 'tf_efficientnetv2_l.in21k', 'tf_efficientnetv2_xl_in21k': 'tf_efficientnetv2_xl.in21k', 'efficientnet_b2a': 'efficientnet_b2', 'efficientnet_b3a': 'efficientnet_b3', 'mnasnet_a1': 'semnasnet_100', 'mnasnet_b1': 'mnasnet_100', })
pytorch-image-models/timm/models/efficientnet.py/0
{ "file_path": "pytorch-image-models/timm/models/efficientnet.py", "repo_id": "pytorch-image-models", "token_count": 47473 }
184
""" Pytorch Inception-Resnet-V2 implementation Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) """ from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import create_classifier, ConvNormAct from ._builder import build_model_with_cfg from ._manipulate import flatten_modules from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['InceptionResnetV2'] class Mixed_5b(nn.Module): def __init__(self, conv_block=None): super(Mixed_5b, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = conv_block(192, 96, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(192, 48, kernel_size=1, stride=1), conv_block(48, 64, kernel_size=5, stride=1, padding=2) ) self.branch2 = nn.Sequential( conv_block(192, 64, kernel_size=1, stride=1), conv_block(64, 96, kernel_size=3, stride=1, padding=1), conv_block(96, 96, kernel_size=3, stride=1, padding=1) ) self.branch3 = nn.Sequential( nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), conv_block(192, 64, kernel_size=1, stride=1) ) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class Block35(nn.Module): def __init__(self, scale=1.0, conv_block=None): super(Block35, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(320, 32, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(320, 32, kernel_size=1, stride=1), conv_block(32, 32, kernel_size=3, stride=1, padding=1) ) self.branch2 = nn.Sequential( conv_block(320, 32, kernel_size=1, stride=1), conv_block(32, 48, kernel_size=3, stride=1, padding=1), conv_block(48, 64, kernel_size=3, stride=1, padding=1) ) self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1) self.act = nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) out = self.conv2d(out) out = out * self.scale + x out = self.act(out) return out class Mixed_6a(nn.Module): def __init__(self, conv_block=None): super(Mixed_6a, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = conv_block(320, 384, kernel_size=3, stride=2) self.branch1 = nn.Sequential( conv_block(320, 256, kernel_size=1, stride=1), conv_block(256, 256, kernel_size=3, stride=1, padding=1), conv_block(256, 384, kernel_size=3, stride=2) ) self.branch2 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) return out class Block17(nn.Module): def __init__(self, scale=1.0, conv_block=None): super(Block17, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(1088, 192, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(1088, 128, kernel_size=1, stride=1), conv_block(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)), conv_block(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)) ) self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1) self.act = nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) out = self.conv2d(out) out = out * self.scale + x out = self.act(out) return out class Mixed_7a(nn.Module): def __init__(self, conv_block=None): super(Mixed_7a, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = nn.Sequential( conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 384, kernel_size=3, stride=2) ) self.branch1 = nn.Sequential( conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 288, kernel_size=3, stride=2) ) self.branch2 = nn.Sequential( conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 288, kernel_size=3, stride=1, padding=1), conv_block(288, 320, kernel_size=3, stride=2) ) self.branch3 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class Block8(nn.Module): def __init__(self, scale=1.0, no_relu=False, conv_block=None): super(Block8, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(2080, 192, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(2080, 192, kernel_size=1, stride=1), conv_block(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)), conv_block(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) ) self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1) self.relu = None if no_relu else nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) out = self.conv2d(out) out = out * self.scale + x if self.relu is not None: out = self.relu(out) return out class InceptionResnetV2(nn.Module): def __init__( self, num_classes=1000, in_chans=3, drop_rate=0., output_stride=32, global_pool='avg', norm_layer='batchnorm2d', norm_eps=1e-3, act_layer='relu', ): super(InceptionResnetV2, self).__init__() self.num_classes = num_classes self.num_features = 1536 assert output_stride == 32 conv_block = partial( ConvNormAct, padding=0, norm_layer=norm_layer, act_layer=act_layer, norm_kwargs=dict(eps=norm_eps), act_kwargs=dict(inplace=True), ) self.conv2d_1a = conv_block(in_chans, 32, kernel_size=3, stride=2) self.conv2d_2a = conv_block(32, 32, kernel_size=3, stride=1) self.conv2d_2b = conv_block(32, 64, kernel_size=3, stride=1, padding=1) self.feature_info = [dict(num_chs=64, reduction=2, module='conv2d_2b')] self.maxpool_3a = nn.MaxPool2d(3, stride=2) self.conv2d_3b = conv_block(64, 80, kernel_size=1, stride=1) self.conv2d_4a = conv_block(80, 192, kernel_size=3, stride=1) self.feature_info += [dict(num_chs=192, reduction=4, module='conv2d_4a')] self.maxpool_5a = nn.MaxPool2d(3, stride=2) self.mixed_5b = Mixed_5b(conv_block=conv_block) self.repeat = nn.Sequential(*[Block35(scale=0.17, conv_block=conv_block) for _ in range(10)]) self.feature_info += [dict(num_chs=320, reduction=8, module='repeat')] self.mixed_6a = Mixed_6a(conv_block=conv_block) self.repeat_1 = nn.Sequential(*[Block17(scale=0.10, conv_block=conv_block) for _ in range(20)]) self.feature_info += [dict(num_chs=1088, reduction=16, module='repeat_1')] self.mixed_7a = Mixed_7a(conv_block=conv_block) self.repeat_2 = nn.Sequential(*[Block8(scale=0.20, conv_block=conv_block) for _ in range(9)]) self.block8 = Block8(no_relu=True, conv_block=conv_block) self.conv2d_7b = conv_block(2080, self.num_features, kernel_size=1, stride=1) self.feature_info += [dict(num_chs=self.num_features, reduction=32, module='conv2d_7b')] self.global_pool, self.head_drop, self.classif = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def group_matcher(self, coarse=False): module_map = {k: i for i, (k, _) in enumerate(flatten_modules(self.named_children(), prefix=()))} module_map.pop(('classif',)) def _matcher(name): if any([name.startswith(n) for n in ('conv2d_1', 'conv2d_2')]): return 0 elif any([name.startswith(n) for n in ('conv2d_3', 'conv2d_4')]): return 1 elif any([name.startswith(n) for n in ('block8', 'conv2d_7')]): return len(module_map) + 1 else: for k in module_map.keys(): if k == tuple(name.split('.')[:len(k)]): return module_map[k] return float('inf') return _matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, "checkpointing not supported" @torch.jit.ignore def get_classifier(self): return self.classif def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.conv2d_1a(x) x = self.conv2d_2a(x) x = self.conv2d_2b(x) x = self.maxpool_3a(x) x = self.conv2d_3b(x) x = self.conv2d_4a(x) x = self.maxpool_5a(x) x = self.mixed_5b(x) x = self.repeat(x) x = self.mixed_6a(x) x = self.repeat_1(x) x = self.mixed_7a(x) x = self.repeat_2(x) x = self.block8(x) x = self.conv2d_7b(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.classif(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_inception_resnet_v2(variant, pretrained=False, **kwargs): return build_model_with_cfg(InceptionResnetV2, variant, pretrained, **kwargs) default_cfgs = generate_default_cfgs({ # ported from http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz 'inception_resnet_v2.tf_in1k': { 'hf_hub_id': 'timm/', 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.8975, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', }, # As per https://arxiv.org/abs/1705.07204 and # ported from http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz 'inception_resnet_v2.tf_ens_adv_in1k': { 'hf_hub_id': 'timm/', 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.8975, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', } }) @register_model def inception_resnet_v2(pretrained=False, **kwargs) -> InceptionResnetV2: return _create_inception_resnet_v2('inception_resnet_v2', pretrained=pretrained, **kwargs) register_model_deprecations(__name__, { 'ens_adv_inception_resnet_v2': 'inception_resnet_v2.tf_ens_adv_in1k', })
pytorch-image-models/timm/models/inception_resnet_v2.py/0
{ "file_path": "pytorch-image-models/timm/models/inception_resnet_v2.py", "repo_id": "pytorch-image-models", "token_count": 6015 }
185
""" Pyramid Vision Transformer v2 @misc{wang2021pvtv2, title={PVTv2: Improved Baselines with Pyramid Vision Transformer}, author={Wenhai Wang and Enze Xie and Xiang Li and Deng-Ping Fan and Kaitao Song and Ding Liang and Tong Lu and Ping Luo and Ling Shao}, year={2021}, eprint={2106.13797}, archivePrefix={arXiv}, primaryClass={cs.CV} } Based on Apache 2.0 licensed code at https://github.com/whai362/PVT Modifications and timm support by / Copyright 2022, Ross Wightman """ import math from typing import Tuple, List, Callable, Union import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, to_2tuple, to_ntuple, trunc_normal_, LayerNorm, use_fused_attn from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['PyramidVisionTransformerV2'] class MlpWithDepthwiseConv(nn.Module): def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., extra_relu=False, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.relu = nn.ReLU() if extra_relu else nn.Identity() self.dwconv = nn.Conv2d(hidden_features, hidden_features, 3, 1, 1, bias=True, groups=hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x, feat_size: List[int]): x = self.fc1(x) B, N, C = x.shape x = x.transpose(1, 2).view(B, C, feat_size[0], feat_size[1]) x = self.relu(x) x = self.dwconv(x) x = x.flatten(2).transpose(1, 2) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__( self, dim, num_heads=8, sr_ratio=1, linear_attn=False, qkv_bias=True, attn_drop=0., proj_drop=0. ): super().__init__() assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** -0.5 self.fused_attn = use_fused_attn() self.q = nn.Linear(dim, dim, bias=qkv_bias) self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) if not linear_attn: self.pool = None if sr_ratio > 1: self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) self.norm = nn.LayerNorm(dim) else: self.sr = None self.norm = None self.act = None else: self.pool = nn.AdaptiveAvgPool2d(7) self.sr = nn.Conv2d(dim, dim, kernel_size=1, stride=1) self.norm = nn.LayerNorm(dim) self.act = nn.GELU() def forward(self, x, feat_size: List[int]): B, N, C = x.shape H, W = feat_size q = self.q(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) if self.pool is not None: x = x.permute(0, 2, 1).reshape(B, C, H, W) x = self.sr(self.pool(x)).reshape(B, C, -1).permute(0, 2, 1) x = self.norm(x) x = self.act(x) kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) else: if self.sr is not None: x = x.permute(0, 2, 1).reshape(B, C, H, W) x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1) x = self.norm(x) kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) else: kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) k, v = kv.unbind(0) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., sr_ratio=1, linear_attn=False, qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, sr_ratio=sr_ratio, linear_attn=linear_attn, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = MlpWithDepthwiseConv( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, extra_relu=linear_attn, ) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x, feat_size: List[int]): x = x + self.drop_path1(self.attn(self.norm1(x), feat_size)) x = x + self.drop_path2(self.mlp(self.norm2(x), feat_size)) return x class OverlapPatchEmbed(nn.Module): """ Image to Patch Embedding """ def __init__(self, patch_size=7, stride=4, in_chans=3, embed_dim=768): super().__init__() patch_size = to_2tuple(patch_size) assert max(patch_size) > stride, "Set larger patch_size than stride" self.patch_size = patch_size self.proj = nn.Conv2d( in_chans, embed_dim, patch_size, stride=stride, padding=(patch_size[0] // 2, patch_size[1] // 2)) self.norm = nn.LayerNorm(embed_dim) def forward(self, x): x = self.proj(x) x = x.permute(0, 2, 3, 1) x = self.norm(x) return x class PyramidVisionTransformerStage(nn.Module): def __init__( self, dim: int, dim_out: int, depth: int, downsample: bool = True, num_heads: int = 8, sr_ratio: int = 1, linear_attn: bool = False, mlp_ratio: float = 4.0, qkv_bias: bool = True, proj_drop: float = 0., attn_drop: float = 0., drop_path: Union[List[float], float] = 0.0, norm_layer: Callable = LayerNorm, ): super().__init__() self.grad_checkpointing = False if downsample: self.downsample = OverlapPatchEmbed( patch_size=3, stride=2, in_chans=dim, embed_dim=dim_out, ) else: assert dim == dim_out self.downsample = None self.blocks = nn.ModuleList([Block( dim=dim_out, num_heads=num_heads, sr_ratio=sr_ratio, linear_attn=linear_attn, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer, ) for i in range(depth)]) self.norm = norm_layer(dim_out) def forward(self, x): # x is either B, C, H, W (if downsample) or B, H, W, C if not if self.downsample is not None: # input to downsample is B, C, H, W x = self.downsample(x) # output B, H, W, C B, H, W, C = x.shape feat_size = (H, W) x = x.reshape(B, -1, C) for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint.checkpoint(blk, x, feat_size) else: x = blk(x, feat_size) x = self.norm(x) x = x.reshape(B, feat_size[0], feat_size[1], -1).permute(0, 3, 1, 2).contiguous() return x class PyramidVisionTransformerV2(nn.Module): def __init__( self, in_chans=3, num_classes=1000, global_pool='avg', depths=(3, 4, 6, 3), embed_dims=(64, 128, 256, 512), num_heads=(1, 2, 4, 8), sr_ratios=(8, 4, 2, 1), mlp_ratios=(8., 8., 4., 4.), qkv_bias=True, linear=False, drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=LayerNorm, ): super().__init__() self.num_classes = num_classes assert global_pool in ('avg', '') self.global_pool = global_pool self.depths = depths num_stages = len(depths) mlp_ratios = to_ntuple(num_stages)(mlp_ratios) num_heads = to_ntuple(num_stages)(num_heads) sr_ratios = to_ntuple(num_stages)(sr_ratios) assert(len(embed_dims)) == num_stages self.feature_info = [] self.patch_embed = OverlapPatchEmbed( patch_size=7, stride=4, in_chans=in_chans, embed_dim=embed_dims[0], ) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] cur = 0 prev_dim = embed_dims[0] stages = [] for i in range(num_stages): stages += [PyramidVisionTransformerStage( dim=prev_dim, dim_out=embed_dims[i], depth=depths[i], downsample=i > 0, num_heads=num_heads[i], sr_ratio=sr_ratios[i], mlp_ratio=mlp_ratios[i], linear_attn=linear, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, )] prev_dim = embed_dims[i] cur += depths[i] self.feature_info += [dict(num_chs=prev_dim, reduction=4 * 2**i, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) # classification head self.num_features = embed_dims[-1] self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Conv2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if m.bias is not None: m.bias.data.zero_() def freeze_patch_emb(self): self.patch_embed.requires_grad = False @torch.jit.ignore def no_weight_decay(self): return {} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^patch_embed', # stem and embed blocks=r'^stages\.(\d+)' ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('avg', '') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x.mean(dim=(-1, -2)) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _checkpoint_filter_fn(state_dict, model): """ Remap original checkpoints -> timm """ if 'patch_embed.proj.weight' in state_dict: return state_dict # non-original checkpoint, no remapping needed out_dict = {} import re for k, v in state_dict.items(): if k.startswith('patch_embed'): k = k.replace('patch_embed1', 'patch_embed') k = k.replace('patch_embed2', 'stages.1.downsample') k = k.replace('patch_embed3', 'stages.2.downsample') k = k.replace('patch_embed4', 'stages.3.downsample') k = k.replace('dwconv.dwconv', 'dwconv') k = re.sub(r'block(\d+).(\d+)', lambda x: f'stages.{int(x.group(1)) - 1}.blocks.{x.group(2)}', k) k = re.sub(r'^norm(\d+)', lambda x: f'stages.{int(x.group(1)) - 1}.norm', k) out_dict[k] = v return out_dict def _create_pvt2(variant, pretrained=False, **kwargs): default_out_indices = tuple(range(4)) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( PyramidVisionTransformerV2, variant, pretrained, pretrained_filter_fn=_checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', 'fixed_input_size': False, **kwargs } default_cfgs = generate_default_cfgs({ 'pvt_v2_b0.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b1.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b2.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b3.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b4.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b5.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b2_li.in1k': _cfg(hf_hub_id='timm/'), }) @register_model def pvt_v2_b0(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(2, 2, 2, 2), embed_dims=(32, 64, 160, 256), num_heads=(1, 2, 5, 8)) return _create_pvt2('pvt_v2_b0', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b1(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(2, 2, 2, 2), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8)) return _create_pvt2('pvt_v2_b1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b2(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(3, 4, 6, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8)) return _create_pvt2('pvt_v2_b2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b3(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(3, 4, 18, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8)) return _create_pvt2('pvt_v2_b3', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b4(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(3, 8, 27, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8)) return _create_pvt2('pvt_v2_b4', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b5(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict( depths=(3, 6, 40, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), mlp_ratios=(4, 4, 4, 4)) return _create_pvt2('pvt_v2_b5', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b2_li(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict( depths=(3, 4, 6, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), linear=True) return _create_pvt2('pvt_v2_b2_li', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/pvt_v2.py/0
{ "file_path": "pytorch-image-models/timm/models/pvt_v2.py", "repo_id": "pytorch-image-models", "token_count": 9047 }
186
""" Swin Transformer V2 A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` - https://arxiv.org/pdf/2111.09883 Code adapted from https://github.com/ChristophReich1996/Swin-Transformer-V2, original copyright/license info below This implementation is experimental and subject to change in manners that will break weight compat: * Size of the pos embed MLP are not spelled out in paper in terms of dim, fixed for all models? vary with num_heads? * currently dim is fixed, I feel it may make sense to scale with num_heads (dim per head) * The specifics of the memory saving 'sequential attention' are not detailed, Christoph Reich has an impl at GitHub link above. It needs further investigation as throughput vs mem tradeoff doesn't appear beneficial. * num_heads per stage is not detailed for Huge and Giant model variants * 'Giant' is 3B params in paper but ~2.6B here despite matching paper dim + block counts * experiments are ongoing wrt to 'main branch' norm layer use and weight init scheme Noteworthy additions over official Swin v1: * MLP relative position embedding is looking promising and adapts to different image/window sizes * This impl has been designed to allow easy change of image size with matching window size changes * Non-square image size and window size are supported Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman """ # -------------------------------------------------------- # Swin Transformer V2 reimplementation # Copyright (c) 2021 Christoph Reich # Licensed under The MIT License [see LICENSE for details] # Written by Christoph Reich # -------------------------------------------------------- import logging import math from typing import Tuple, Optional, List, Union, Any, Type import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, Mlp, ClassifierHead, to_2tuple, _assert, ndgrid from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._manipulate import named_apply from ._registry import generate_default_cfgs, register_model __all__ = ['SwinTransformerV2Cr'] # model_registry will add each entrypoint fn to this _logger = logging.getLogger(__name__) def bchw_to_bhwc(x: torch.Tensor) -> torch.Tensor: """Permutes a tensor from the shape (B, C, H, W) to (B, H, W, C). """ return x.permute(0, 2, 3, 1) def bhwc_to_bchw(x: torch.Tensor) -> torch.Tensor: """Permutes a tensor from the shape (B, H, W, C) to (B, C, H, W). """ return x.permute(0, 3, 1, 2) def window_partition(x, window_size: Tuple[int, int]): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function # reason: int argument is a Proxy def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]): """ Args: windows: (num_windows * B, window_size[0], window_size[1], C) window_size (Tuple[int, int]): Window size img_size (Tuple[int, int]): Image size Returns: x: (B, H, W, C) """ H, W = img_size C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x class WindowMultiHeadAttention(nn.Module): r"""This class implements window-based Multi-Head-Attention with log-spaced continuous position bias. Args: dim (int): Number of input features window_size (int): Window size num_heads (int): Number of attention heads drop_attn (float): Dropout rate of attention map drop_proj (float): Dropout rate after projection meta_hidden_dim (int): Number of hidden features in the two layer MLP meta network sequential_attn (bool): If true sequential self-attention is performed """ def __init__( self, dim: int, num_heads: int, window_size: Tuple[int, int], drop_attn: float = 0.0, drop_proj: float = 0.0, meta_hidden_dim: int = 384, # FIXME what's the optimal value? sequential_attn: bool = False, ) -> None: super(WindowMultiHeadAttention, self).__init__() assert dim % num_heads == 0, \ "The number of input features (in_features) are not divisible by the number of heads (num_heads)." self.in_features: int = dim self.window_size: Tuple[int, int] = window_size self.num_heads: int = num_heads self.sequential_attn: bool = sequential_attn self.qkv = nn.Linear(in_features=dim, out_features=dim * 3, bias=True) self.attn_drop = nn.Dropout(drop_attn) self.proj = nn.Linear(in_features=dim, out_features=dim, bias=True) self.proj_drop = nn.Dropout(drop_proj) # meta network for positional encodings self.meta_mlp = Mlp( 2, # x, y hidden_features=meta_hidden_dim, out_features=num_heads, act_layer=nn.ReLU, drop=(0.125, 0.) # FIXME should there be stochasticity, appears to 'overfit' without? ) # NOTE old checkpoints used inverse of logit_scale ('tau') following the paper, see conversion fn self.logit_scale = nn.Parameter(torch.log(10 * torch.ones(num_heads))) self._make_pair_wise_relative_positions() def _make_pair_wise_relative_positions(self) -> None: """Method initializes the pair-wise relative positions to compute the positional biases.""" device = self.logit_scale.device coordinates = torch.stack(ndgrid( torch.arange(self.window_size[0], device=device), torch.arange(self.window_size[1], device=device) ), dim=0).flatten(1) relative_coordinates = coordinates[:, :, None] - coordinates[:, None, :] relative_coordinates = relative_coordinates.permute(1, 2, 0).reshape(-1, 2).float() relative_coordinates_log = torch.sign(relative_coordinates) * torch.log( 1.0 + relative_coordinates.abs()) self.register_buffer("relative_coordinates_log", relative_coordinates_log, persistent=False) def update_input_size(self, new_window_size: int, **kwargs: Any) -> None: """Method updates the window size and so the pair-wise relative positions Args: new_window_size (int): New window size kwargs (Any): Unused """ # Set new window size and new pair-wise relative positions self.window_size: int = new_window_size self._make_pair_wise_relative_positions() def _relative_positional_encodings(self) -> torch.Tensor: """Method computes the relative positional encodings Returns: relative_position_bias (torch.Tensor): Relative positional encodings (1, number of heads, window size ** 2, window size ** 2) """ window_area = self.window_size[0] * self.window_size[1] relative_position_bias = self.meta_mlp(self.relative_coordinates_log) relative_position_bias = relative_position_bias.transpose(1, 0).reshape( self.num_heads, window_area, window_area ) relative_position_bias = relative_position_bias.unsqueeze(0) return relative_position_bias def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor: """ Forward pass. Args: x (torch.Tensor): Input tensor of the shape (B * windows, N, C) mask (Optional[torch.Tensor]): Attention mask for the shift case Returns: Output tensor of the shape [B * windows, N, C] """ Bw, L, C = x.shape qkv = self.qkv(x).view(Bw, L, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) query, key, value = qkv.unbind(0) # compute attention map with scaled cosine attention attn = (F.normalize(query, dim=-1) @ F.normalize(key, dim=-1).transpose(-2, -1)) logit_scale = torch.clamp(self.logit_scale.reshape(1, self.num_heads, 1, 1), max=math.log(1. / 0.01)).exp() attn = attn * logit_scale attn = attn + self._relative_positional_encodings() if mask is not None: # Apply mask if utilized num_win: int = mask.shape[0] attn = attn.view(Bw // num_win, num_win, self.num_heads, L, L) attn = attn + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, L, L) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ value).transpose(1, 2).reshape(Bw, L, -1) x = self.proj(x) x = self.proj_drop(x) return x class SwinTransformerV2CrBlock(nn.Module): r"""This class implements the Swin transformer block. Args: dim (int): Number of input channels num_heads (int): Number of attention heads to be utilized feat_size (Tuple[int, int]): Input resolution window_size (Tuple[int, int]): Window size to be utilized shift_size (int): Shifting size to be used mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels proj_drop (float): Dropout in input mapping drop_attn (float): Dropout rate of attention map drop_path (float): Dropout in main path extra_norm (bool): Insert extra norm on 'main' branch if True sequential_attn (bool): If true sequential self-attention is performed norm_layer (Type[nn.Module]): Type of normalization layer to be utilized """ def __init__( self, dim: int, num_heads: int, feat_size: Tuple[int, int], window_size: Tuple[int, int], shift_size: Tuple[int, int] = (0, 0), mlp_ratio: float = 4.0, init_values: Optional[float] = 0, proj_drop: float = 0.0, drop_attn: float = 0.0, drop_path: float = 0.0, extra_norm: bool = False, sequential_attn: bool = False, norm_layer: Type[nn.Module] = nn.LayerNorm, ) -> None: super(SwinTransformerV2CrBlock, self).__init__() self.dim: int = dim self.feat_size: Tuple[int, int] = feat_size self.target_shift_size: Tuple[int, int] = to_2tuple(shift_size) self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(window_size)) self.window_area = self.window_size[0] * self.window_size[1] self.init_values: Optional[float] = init_values # attn branch self.attn = WindowMultiHeadAttention( dim=dim, num_heads=num_heads, window_size=self.window_size, drop_attn=drop_attn, drop_proj=proj_drop, sequential_attn=sequential_attn, ) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity() # mlp branch self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), drop=proj_drop, out_features=dim, ) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity() # Extra main branch norm layer mentioned for Huge/Giant models in V2 paper. # Also being used as final network norm and optional stage ending norm while still in a C-last format. self.norm3 = norm_layer(dim) if extra_norm else nn.Identity() self._make_attention_mask() self.init_weights() def _calc_window_shift(self, target_window_size): window_size = [f if f <= w else w for f, w in zip(self.feat_size, target_window_size)] shift_size = [0 if f <= w else s for f, w, s in zip(self.feat_size, window_size, self.target_shift_size)] return tuple(window_size), tuple(shift_size) def _make_attention_mask(self) -> None: """Method generates the attention mask used in shift case.""" # Make masks for shift case if any(self.shift_size): # calculate attention mask for SW-MSA H, W = self.feat_size img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 cnt = 0 for h in ( slice(0, -self.window_size[0]), slice(-self.window_size[0], -self.shift_size[0]), slice(-self.shift_size[0], None)): for w in ( slice(0, -self.window_size[1]), slice(-self.window_size[1], -self.shift_size[1]), slice(-self.shift_size[1], None)): img_mask[:, h, w, :] = cnt cnt += 1 mask_windows = window_partition(img_mask, self.window_size) # num_windows, window_size, window_size, 1 mask_windows = mask_windows.view(-1, self.window_area) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None self.register_buffer("attn_mask", attn_mask, persistent=False) def init_weights(self): # extra, module specific weight init if self.init_values is not None: nn.init.constant_(self.norm1.weight, self.init_values) nn.init.constant_(self.norm2.weight, self.init_values) def update_input_size(self, new_window_size: Tuple[int, int], new_feat_size: Tuple[int, int]) -> None: """Method updates the image resolution to be processed and window size and so the pair-wise relative positions. Args: new_window_size (int): New window size new_feat_size (Tuple[int, int]): New input resolution """ # Update input resolution self.feat_size: Tuple[int, int] = new_feat_size self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(new_window_size)) self.window_area = self.window_size[0] * self.window_size[1] self.attn.update_input_size(new_window_size=self.window_size) self._make_attention_mask() def _shifted_window_attn(self, x): B, H, W, C = x.shape # cyclic shift sh, sw = self.shift_size do_shift: bool = any(self.shift_size) if do_shift: # FIXME PyTorch XLA needs cat impl, roll not lowered # x = torch.cat([x[:, sh:], x[:, :sh]], dim=1) # x = torch.cat([x[:, :, sw:], x[:, :, :sw]], dim=2) x = torch.roll(x, shifts=(-sh, -sw), dims=(1, 2)) # partition windows x_windows = window_partition(x, self.window_size) # num_windows * B, window_size, window_size, C x_windows = x_windows.view(-1, self.window_size[0] * self.window_size[1], C) # W-MSA/SW-MSA attn_windows = self.attn(x_windows, mask=self.attn_mask) # num_windows * B, window_size * window_size, C # merge windows attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) x = window_reverse(attn_windows, self.window_size, self.feat_size) # B H' W' C # reverse cyclic shift if do_shift: # FIXME PyTorch XLA needs cat impl, roll not lowered # x = torch.cat([x[:, -sh:], x[:, :-sh]], dim=1) # x = torch.cat([x[:, :, -sw:], x[:, :, :-sw]], dim=2) x = torch.roll(x, shifts=(sh, sw), dims=(1, 2)) return x def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x (torch.Tensor): Input tensor of the shape [B, C, H, W] Returns: output (torch.Tensor): Output tensor of the shape [B, C, H, W] """ # post-norm branches (op -> norm -> drop) x = x + self.drop_path1(self.norm1(self._shifted_window_attn(x))) B, H, W, C = x.shape x = x.reshape(B, -1, C) x = x + self.drop_path2(self.norm2(self.mlp(x))) x = self.norm3(x) # main-branch norm enabled for some blocks / stages (every 6 for Huge/Giant) x = x.reshape(B, H, W, C) return x class PatchMerging(nn.Module): """ This class implements the patch merging as a strided convolution with a normalization before. Args: dim (int): Number of input channels norm_layer (Type[nn.Module]): Type of normalization layer to be utilized. """ def __init__(self, dim: int, norm_layer: Type[nn.Module] = nn.LayerNorm) -> None: super(PatchMerging, self).__init__() self.norm = norm_layer(4 * dim) self.reduction = nn.Linear(in_features=4 * dim, out_features=2 * dim, bias=False) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Forward pass. Args: x (torch.Tensor): Input tensor of the shape [B, C, H, W] Returns: output (torch.Tensor): Output tensor of the shape [B, 2 * C, H // 2, W // 2] """ B, H, W, C = x.shape x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3) x = self.norm(x) x = self.reduction(x) return x class PatchEmbed(nn.Module): """ 2D Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.img_size = img_size self.patch_size = patch_size self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def forward(self, x): B, C, H, W = x.shape _assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).") _assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).") x = self.proj(x) x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x class SwinTransformerV2CrStage(nn.Module): r"""This class implements a stage of the Swin transformer including multiple layers. Args: embed_dim (int): Number of input channels depth (int): Depth of the stage (number of layers) downscale (bool): If true input is downsampled (see Fig. 3 or V1 paper) feat_size (Tuple[int, int]): input feature map size (H, W) num_heads (int): Number of attention heads to be utilized window_size (int): Window size to be utilized mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels proj_drop (float): Dropout in input mapping drop_attn (float): Dropout rate of attention map drop_path (float): Dropout in main path norm_layer (Type[nn.Module]): Type of normalization layer to be utilized. Default: nn.LayerNorm extra_norm_period (int): Insert extra norm layer on main branch every N (period) blocks extra_norm_stage (bool): End each stage with an extra norm layer in main branch sequential_attn (bool): If true sequential self-attention is performed """ def __init__( self, embed_dim: int, depth: int, downscale: bool, num_heads: int, feat_size: Tuple[int, int], window_size: Tuple[int, int], mlp_ratio: float = 4.0, init_values: Optional[float] = 0.0, proj_drop: float = 0.0, drop_attn: float = 0.0, drop_path: Union[List[float], float] = 0.0, norm_layer: Type[nn.Module] = nn.LayerNorm, extra_norm_period: int = 0, extra_norm_stage: bool = False, sequential_attn: bool = False, ) -> None: super(SwinTransformerV2CrStage, self).__init__() self.downscale: bool = downscale self.grad_checkpointing: bool = False self.feat_size: Tuple[int, int] = (feat_size[0] // 2, feat_size[1] // 2) if downscale else feat_size if downscale: self.downsample = PatchMerging(embed_dim, norm_layer=norm_layer) embed_dim = embed_dim * 2 else: self.downsample = nn.Identity() def _extra_norm(index): i = index + 1 if extra_norm_period and i % extra_norm_period == 0: return True return i == depth if extra_norm_stage else False self.blocks = nn.Sequential(*[ SwinTransformerV2CrBlock( dim=embed_dim, num_heads=num_heads, feat_size=self.feat_size, window_size=window_size, shift_size=tuple([0 if ((index % 2) == 0) else w // 2 for w in window_size]), mlp_ratio=mlp_ratio, init_values=init_values, proj_drop=proj_drop, drop_attn=drop_attn, drop_path=drop_path[index] if isinstance(drop_path, list) else drop_path, extra_norm=_extra_norm(index), sequential_attn=sequential_attn, norm_layer=norm_layer, ) for index in range(depth)] ) def update_input_size(self, new_window_size: int, new_feat_size: Tuple[int, int]) -> None: """Method updates the resolution to utilize and the window size and so the pair-wise relative positions. Args: new_window_size (int): New window size new_feat_size (Tuple[int, int]): New input resolution """ self.feat_size: Tuple[int, int] = ( (new_feat_size[0] // 2, new_feat_size[1] // 2) if self.downscale else new_feat_size ) for block in self.blocks: block.update_input_size(new_window_size=new_window_size, new_feat_size=self.feat_size) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x (torch.Tensor): Input tensor of the shape [B, C, H, W] or [B, L, C] Returns: output (torch.Tensor): Output tensor of the shape [B, 2 * C, H // 2, W // 2] """ x = bchw_to_bhwc(x) x = self.downsample(x) for block in self.blocks: # Perform checkpointing if utilized if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint.checkpoint(block, x) else: x = block(x) x = bhwc_to_bchw(x) return x class SwinTransformerV2Cr(nn.Module): r""" Swin Transformer V2 A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` - https://arxiv.org/pdf/2111.09883 Args: img_size: Input resolution. window_size: Window size. If None, img_size // window_div img_window_ratio: Window size to image size ratio. patch_size: Patch size. in_chans: Number of input channels. depths: Depth of the stage (number of layers). num_heads: Number of attention heads to be utilized. embed_dim: Patch embedding dimension. num_classes: Number of output classes. mlp_ratio: Ratio of the hidden dimension in the FFN to the input channels. drop_rate: Dropout rate. proj_drop_rate: Projection dropout rate. attn_drop_rate: Dropout rate of attention map. drop_path_rate: Stochastic depth rate. norm_layer: Type of normalization layer to be utilized. extra_norm_period: Insert extra norm layer on main branch every N (period) blocks in stage extra_norm_stage: End each stage with an extra norm layer in main branch sequential_attn: If true sequential self-attention is performed. """ def __init__( self, img_size: Tuple[int, int] = (224, 224), patch_size: int = 4, window_size: Optional[int] = None, img_window_ratio: int = 32, in_chans: int = 3, num_classes: int = 1000, embed_dim: int = 96, depths: Tuple[int, ...] = (2, 2, 6, 2), num_heads: Tuple[int, ...] = (3, 6, 12, 24), mlp_ratio: float = 4.0, init_values: Optional[float] = 0., drop_rate: float = 0.0, proj_drop_rate: float = 0.0, attn_drop_rate: float = 0.0, drop_path_rate: float = 0.0, norm_layer: Type[nn.Module] = nn.LayerNorm, extra_norm_period: int = 0, extra_norm_stage: bool = False, sequential_attn: bool = False, global_pool: str = 'avg', weight_init='skip', **kwargs: Any ) -> None: super(SwinTransformerV2Cr, self).__init__() img_size = to_2tuple(img_size) window_size = tuple([ s // img_window_ratio for s in img_size]) if window_size is None else to_2tuple(window_size) self.num_classes: int = num_classes self.patch_size: int = patch_size self.img_size: Tuple[int, int] = img_size self.window_size: int = window_size self.num_features: int = int(embed_dim * 2 ** (len(depths) - 1)) self.feature_info = [] self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer, ) patch_grid_size: Tuple[int, int] = self.patch_embed.grid_size dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] stages = [] in_dim = embed_dim in_scale = 1 for stage_idx, (depth, num_heads) in enumerate(zip(depths, num_heads)): stages += [SwinTransformerV2CrStage( embed_dim=in_dim, depth=depth, downscale=stage_idx != 0, feat_size=( patch_grid_size[0] // in_scale, patch_grid_size[1] // in_scale ), num_heads=num_heads, window_size=window_size, mlp_ratio=mlp_ratio, init_values=init_values, proj_drop=proj_drop_rate, drop_attn=attn_drop_rate, drop_path=dpr[stage_idx], extra_norm_period=extra_norm_period, extra_norm_stage=extra_norm_stage or (stage_idx + 1) == len(depths), # last stage ends w/ norm sequential_attn=sequential_attn, norm_layer=norm_layer, )] if stage_idx != 0: in_dim *= 2 in_scale *= 2 self.feature_info += [dict(num_chs=in_dim, reduction=4 * in_scale, module=f'stages.{stage_idx}')] self.stages = nn.Sequential(*stages) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate, ) # current weight init skips custom init and uses pytorch layer defaults, seems to work well # FIXME more experiments needed if weight_init != 'skip': named_apply(init_weights, self) def update_input_size( self, new_img_size: Optional[Tuple[int, int]] = None, new_window_size: Optional[int] = None, img_window_ratio: int = 32, ) -> None: """Method updates the image resolution to be processed and window size and so the pair-wise relative positions. Args: new_window_size (Optional[int]): New window size, if None based on new_img_size // window_div new_img_size (Optional[Tuple[int, int]]): New input resolution, if None current resolution is used img_window_ratio (int): divisor for calculating window size from image size """ # Check parameters if new_img_size is None: new_img_size = self.img_size else: new_img_size = to_2tuple(new_img_size) if new_window_size is None: new_window_size = tuple([s // img_window_ratio for s in new_img_size]) # Compute new patch resolution & update resolution of each stage new_patch_grid_size = (new_img_size[0] // self.patch_size, new_img_size[1] // self.patch_size) for index, stage in enumerate(self.stages): stage_scale = 2 ** max(index - 1, 0) stage.update_input_size( new_window_size=new_window_size, new_img_size=(new_patch_grid_size[0] // stage_scale, new_patch_grid_size[1] // stage_scale), ) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^patch_embed', # stem and embed blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+).downsample', (0,)), (r'^stages\.(\d+)\.\w+\.(\d+)', None), ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore() def get_classifier(self) -> nn.Module: """Method returns the classification head of the model. Returns: head (nn.Module): Current classification head """ return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None: """Method results the classification head Args: num_classes (int): Number of classes to be predicted global_pool (str): Unused """ self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.patch_embed(x) x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x def init_weights(module: nn.Module, name: str = ''): # FIXME WIP determining if there's a better weight init if isinstance(module, nn.Linear): if 'qkv' in name: # treat the weights of Q, K, V separately val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1])) nn.init.uniform_(module.weight, -val, val) elif 'head' in name: nn.init.zeros_(module.weight) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def checkpoint_filter_fn(state_dict, model): """ convert patch embedding weight from manual patchify + linear proj to conv""" state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) if 'head.fc.weight' in state_dict: return state_dict out_dict = {} for k, v in state_dict.items(): if 'tau' in k: # convert old tau based checkpoints -> logit_scale (inverse) v = torch.log(1 / v) k = k.replace('tau', 'logit_scale') k = k.replace('head.', 'head.fc.') out_dict[k] = v return out_dict def _create_swin_transformer_v2_cr(variant, pretrained=False, **kwargs): default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 1, 1)))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( SwinTransformerV2Cr, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', **kwargs, } default_cfgs = generate_default_cfgs({ 'swinv2_cr_tiny_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_tiny_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_tiny_ns_224.sw_in1k': _cfg( hf_hub_id='timm/', url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_tiny_ns_224-ba8166c6.pth", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_small_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_small_224.sw_in1k': _cfg( hf_hub_id='timm/', url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_224-0813c165.pth", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_small_ns_224.sw_in1k': _cfg( hf_hub_id='timm/', url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_ns_224_iv-2ce90f8e.pth", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_small_ns_256.untrained': _cfg( url="", input_size=(3, 256, 256), crop_pct=1.0, pool_size=(8, 8)), 'swinv2_cr_base_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_base_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_base_ns_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_large_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_large_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_huge_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_huge_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_giant_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_giant_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), }) @register_model def swinv2_cr_tiny_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-T V2 CR @ 384x384, trained ImageNet-1k""" model_args = dict( embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), ) return _create_swin_transformer_v2_cr('swinv2_cr_tiny_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_tiny_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-T V2 CR @ 224x224, trained ImageNet-1k""" model_args = dict( embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), ) return _create_swin_transformer_v2_cr('swinv2_cr_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_tiny_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-T V2 CR @ 224x224, trained ImageNet-1k w/ extra stage norms. ** Experimental, may make default if results are improved. ** """ model_args = dict( embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), extra_norm_stage=True, ) return _create_swin_transformer_v2_cr('swinv2_cr_tiny_ns_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_small_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-S V2 CR @ 384x384, trained ImageNet-1k""" model_args = dict( embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), ) return _create_swin_transformer_v2_cr('swinv2_cr_small_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_small_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-S V2 CR @ 224x224, trained ImageNet-1k""" model_args = dict( embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), ) return _create_swin_transformer_v2_cr('swinv2_cr_small_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_small_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-S V2 CR @ 224x224, trained ImageNet-1k""" model_args = dict( embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), extra_norm_stage=True, ) return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_small_ns_256(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-S V2 CR @ 256x256, trained ImageNet-1k""" model_args = dict( embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), extra_norm_stage=True, ) return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_base_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-B V2 CR @ 384x384, trained ImageNet-1k""" model_args = dict( embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), ) return _create_swin_transformer_v2_cr('swinv2_cr_base_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_base_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-B V2 CR @ 224x224, trained ImageNet-1k""" model_args = dict( embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), ) return _create_swin_transformer_v2_cr('swinv2_cr_base_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_base_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-B V2 CR @ 224x224, trained ImageNet-1k""" model_args = dict( embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), extra_norm_stage=True, ) return _create_swin_transformer_v2_cr('swinv2_cr_base_ns_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_large_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-L V2 CR @ 384x384, trained ImageNet-1k""" model_args = dict( embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), ) return _create_swin_transformer_v2_cr('swinv2_cr_large_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_large_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-L V2 CR @ 224x224, trained ImageNet-1k""" model_args = dict( embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), ) return _create_swin_transformer_v2_cr('swinv2_cr_large_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_huge_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-H V2 CR @ 384x384, trained ImageNet-1k""" model_args = dict( embed_dim=352, depths=(2, 2, 18, 2), num_heads=(11, 22, 44, 88), # head count not certain for Huge, 384 & 224 trying diff values extra_norm_period=6, ) return _create_swin_transformer_v2_cr('swinv2_cr_huge_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_huge_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-H V2 CR @ 224x224, trained ImageNet-1k""" model_args = dict( embed_dim=352, depths=(2, 2, 18, 2), num_heads=(8, 16, 32, 64), # head count not certain for Huge, 384 & 224 trying diff values extra_norm_period=6, ) return _create_swin_transformer_v2_cr('swinv2_cr_huge_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_giant_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-G V2 CR @ 384x384, trained ImageNet-1k""" model_args = dict( embed_dim=512, depths=(2, 2, 42, 2), num_heads=(16, 32, 64, 128), extra_norm_period=6, ) return _create_swin_transformer_v2_cr('swinv2_cr_giant_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_giant_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-G V2 CR @ 224x224, trained ImageNet-1k""" model_args = dict( embed_dim=512, depths=(2, 2, 42, 2), num_heads=(16, 32, 64, 128), extra_norm_period=6, ) return _create_swin_transformer_v2_cr('swinv2_cr_giant_224', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/swin_transformer_v2_cr.py/0
{ "file_path": "pytorch-image-models/timm/models/swin_transformer_v2_cr.py", "repo_id": "pytorch-image-models", "token_count": 18939 }
187
from .adabelief import AdaBelief from .adafactor import Adafactor from .adahessian import Adahessian from .adamp import AdamP from .adamw import AdamW from .adan import Adan from .lamb import Lamb from .lars import Lars from .lookahead import Lookahead from .madgrad import MADGRAD from .nadam import Nadam from .nvnovograd import NvNovoGrad from .radam import RAdam from .rmsprop_tf import RMSpropTF from .sgdp import SGDP from .lion import Lion from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs
pytorch-image-models/timm/optim/__init__.py/0
{ "file_path": "pytorch-image-models/timm/optim/__init__.py", "repo_id": "pytorch-image-models", "token_count": 170 }
188
"""RAdam Optimizer. Implementation lifted from: https://github.com/LiyuanLucasLiu/RAdam Paper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265 """ import math import torch from torch.optim.optimizer import Optimizer class RAdam(Optimizer): def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, buffer=[[None, None, None] for _ in range(10)]) super(RAdam, self).__init__(params, defaults) def __setstate__(self, state): super(RAdam, self).__setstate__(state) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.float() if grad.is_sparse: raise RuntimeError('RAdam does not support sparse gradients') p_fp32 = p.float() state = self.state[p] if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_fp32) state['exp_avg_sq'] = torch.zeros_like(p_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_fp32) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) state['step'] += 1 buffered = group['buffer'][int(state['step'] % 10)] if state['step'] == buffered[0]: num_sma, step_size = buffered[1], buffered[2] else: buffered[0] = state['step'] beta2_t = beta2 ** state['step'] num_sma_max = 2 / (1 - beta2) - 1 num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) buffered[1] = num_sma # more conservative since it's an approximated value if num_sma >= 5: step_size = group['lr'] * math.sqrt( (1 - beta2_t) * (num_sma - 4) / (num_sma_max - 4) * (num_sma - 2) / num_sma * num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) else: step_size = group['lr'] / (1 - beta1 ** state['step']) buffered[2] = step_size if group['weight_decay'] != 0: p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * group['lr']) # more conservative since it's an approximated value if num_sma >= 5: denom = exp_avg_sq.sqrt().add_(group['eps']) p_fp32.addcdiv_(exp_avg, denom, value=-step_size) else: p_fp32.add_(exp_avg, alpha=-step_size) p.copy_(p_fp32) return loss
pytorch-image-models/timm/optim/radam.py/0
{ "file_path": "pytorch-image-models/timm/optim/radam.py", "repo_id": "pytorch-image-models", "token_count": 1967 }
189
import torch from timm.utils.agc import adaptive_clip_grad def dispatch_clip_grad(parameters, value: float, mode: str = 'norm', norm_type: float = 2.0): """ Dispatch to gradient clipping method Args: parameters (Iterable): model parameters to clip value (float): clipping value/factor/norm, mode dependant mode (str): clipping mode, one of 'norm', 'value', 'agc' norm_type (float): p-norm, default 2.0 """ if mode == 'norm': torch.nn.utils.clip_grad_norm_(parameters, value, norm_type=norm_type) elif mode == 'value': torch.nn.utils.clip_grad_value_(parameters, value) elif mode == 'agc': adaptive_clip_grad(parameters, value, norm_type=norm_type) else: assert False, f"Unknown clip mode ({mode})."
pytorch-image-models/timm/utils/clip_grad.py/0
{ "file_path": "pytorch-image-models/timm/utils/clip_grad.py", "repo_id": "pytorch-image-models", "token_count": 306 }
190
aml target server/transformers server/flash-attention
text-generation-inference/.dockerignore/0
{ "file_path": "text-generation-inference/.dockerignore", "repo_id": "text-generation-inference", "token_count": 16 }
191
import pytest from text_generation import __version__ from huggingface_hub.utils import build_hf_headers @pytest.fixture def flan_t5_xxl(): return "google/flan-t5-xxl" @pytest.fixture def fake_model(): return "fake/model" @pytest.fixture def unsupported_model(): return "gpt2" @pytest.fixture def base_url(): return "https://api-inference.huggingface.co/models" @pytest.fixture def bloom_url(base_url, bloom_model): return f"{base_url}/{bloom_model}" @pytest.fixture def flan_t5_xxl_url(base_url, flan_t5_xxl): return f"{base_url}/{flan_t5_xxl}" @pytest.fixture def fake_url(base_url, fake_model): return f"{base_url}/{fake_model}" @pytest.fixture def unsupported_url(base_url, unsupported_model): return f"{base_url}/{unsupported_model}" @pytest.fixture(scope="session") def hf_headers(): return build_hf_headers( library_name="text-generation-tests", library_version=__version__ )
text-generation-inference/clients/python/tests/conftest.py/0
{ "file_path": "text-generation-inference/clients/python/tests/conftest.py", "repo_id": "text-generation-inference", "token_count": 390 }
192
# Non-core Model Serving TGI supports various LLM architectures (see full list [here](../supported_models)). If you wish to serve a model that is not one of the supported models, TGI will fallback to the `transformers` implementation of that model. This means you will be unable to use some of the features introduced by TGI, such as tensor-parallel sharding or flash attention. However, you can still get many benefits of TGI, such as continuous batching or streaming outputs. You can serve these models using the same Docker command-line invocation as with fully supported models 👇 ```bash docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id gpt2 ``` If the model you wish to serve is a custom transformers model, and its weights and implementation are available in the Hub, you can still serve the model by passing the `--trust-remote-code` flag to the `docker run` command like below 👇 ```bash docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id <CUSTOM_MODEL_ID> --trust-remote-code ``` Finally, if the model is not on Hugging Face Hub but on your local, you can pass the path to the folder that contains your model like below 👇 ```bash # Make sure your model is in the $volume directory docker run --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id /data/<PATH-TO-FOLDER> ``` You can refer to [transformers docs on custom models](https://huggingface.co/docs/transformers/main/en/custom_models) for more information.
text-generation-inference/docs/source/basic_tutorials/non_core_models.md/0
{ "file_path": "text-generation-inference/docs/source/basic_tutorials/non_core_models.md", "repo_id": "text-generation-inference", "token_count": 475 }
193
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 15, "logprob": null, "text": "," }, { "id": 1669, "logprob": -5.4414062, "text": " il" }, { "id": 11580, "logprob": -2.3378906, "text": " faut" }, { "id": 3913, "logprob": -4.3554688, "text": " tout" }, { "id": 39261, "logprob": -2.9238281, "text": " d'abord" } ], "seed": 0, "tokens": [ { "id": 408, "logprob": -0.07891846, "special": false, "text": " que" }, { "id": 366, "logprob": -1.2939453, "special": false, "text": " la" }, { "id": 8769, "logprob": -0.3708496, "special": false, "text": " personne" }, { "id": 1479, "logprob": -2.2871094, "special": false, "text": " qui" }, { "id": 2997, "logprob": -0.8671875, "special": false, "text": " vous" }, { "id": 35977, "logprob": -1.5097656, "special": false, "text": " suit" }, { "id": 21558, "logprob": -0.07891846, "special": false, "text": " ait" }, { "id": 447, "logprob": -0.12695312, "special": false, "text": " un" }, { "id": 78606, "logprob": -2.21875, "special": false, "text": " profil" }, { "id": 3899, "logprob": -1.3535156, "special": false, "text": " bien" } ] }, "generated_text": "Pour déguster un ortolan, il faut tout d'abord que la personne qui vous suit ait un profil bien" }
text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_all_params.json", "repo_id": "text-generation-inference", "token_count": 1199 }
194
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -9.6015625, "text": "Test" }, { "id": 2009, "logprob": -9.6640625, "text": "request" } ], "seed": 0, "tokens": [ { "id": 29899, "logprob": -1.1640625, "special": false, "text": "-" }, { "id": 1454, "logprob": -0.07543945, "special": false, "text": "for" }, { "id": 29899, "logprob": 0.0, "special": false, "text": "-" }, { "id": 9342, "logprob": 0.0, "special": false, "text": "comment" }, { "id": 29901, "logprob": 0.0, "special": false, "text": ":" }, { "id": 396, "logprob": -0.2956543, "special": false, "text": " #" }, { "id": 29906, "logprob": -0.52734375, "special": false, "text": "2" }, { "id": 29900, "logprob": -0.6899414, "special": false, "text": "0" }, { "id": 29896, "logprob": 0.0, "special": false, "text": "1" }, { "id": 29946, "logprob": -1.5068359, "special": false, "text": "4" } ] }, "generated_text": "Test request-for-comment: #2014" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_all_params.json", "repo_id": "text-generation-inference", "token_count": 1024 }
195
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder_load.json", "repo_id": "text-generation-inference", "token_count": 5188 }
196
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.4179688, "text": " is" }, { "id": 247, "logprob": -2.1542969, "text": " a" }, { "id": 1167, "logprob": -5.359375, "text": " mem" }, { "id": 70, "logprob": -0.006038666, "text": "e" }, { "id": 13, "logprob": -7.328125, "text": "," }, { "id": 285, "logprob": -0.3173828, "text": " and" }, { "id": 752, "logprob": -2.0625, "text": " what" }, { "id": 434, "logprob": -5.7734375, "text": "'s" }, { "id": 253, "logprob": -0.74072266, "text": " the" }, { "id": 2892, "logprob": -6.5898438, "text": " history" }, { "id": 3212, "logprob": -2.2949219, "text": " behind" }, { "id": 436, "logprob": -11.40625, "text": " this" }, { "id": 3159, "logprob": -2.1113281, "text": " word" }, { "id": 32, "logprob": -0.008056641, "text": "?" }, { "id": 0, "logprob": -2.3300781, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.28125, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.5878906, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5449219, "special": false, "text": " word" }, { "id": 346, "logprob": -0.05038452, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.002292633, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.3828278e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.0010242462, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.090270996, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12719727, "special": false, "text": " first" }, { "id": 908, "logprob": -0.016571045, "special": false, "text": " used" }, { "id": 275, "logprob": -0.43432617, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }
text-generation-inference/integration-tests/models/__snapshots__/test_neox_sharded/test_neox.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_neox_sharded/test_neox.json", "repo_id": "text-generation-inference", "token_count": 1966 }
197
import pytest @pytest.fixture(scope="module") def flash_santacoder_handle(launcher): with launcher("bigcode/santacoder") as handle: yield handle @pytest.fixture(scope="module") async def flash_santacoder(flash_santacoder_handle): await flash_santacoder_handle.health(300) return flash_santacoder_handle.client @pytest.mark.asyncio async def test_flash_santacoder(flash_santacoder, response_snapshot): response = await flash_santacoder.generate( "def print_hello", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_santacoder_load( flash_santacoder, generate_load, response_snapshot ): responses = await generate_load( flash_santacoder, "def print_hello", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_santacoder.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_santacoder.py", "repo_id": "text-generation-inference", "token_count": 387 }
198
use clap::{Parser, ValueEnum}; use nix::sys::signal::{self, Signal}; use nix::unistd::Pid; use serde::Deserialize; use std::env; use std::ffi::OsString; use std::io::{BufRead, BufReader, Lines}; use std::os::unix::process::{CommandExt, ExitStatusExt}; use std::path::Path; use std::process::{Child, Command, ExitStatus, Stdio}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::TryRecvError; use std::sync::{mpsc, Arc}; use std::thread; use std::thread::sleep; use std::time::{Duration, Instant}; use std::{fs, io}; use tracing_subscriber::EnvFilter; mod env_runtime; #[derive(Clone, Copy, Debug, ValueEnum)] enum Quantization { /// 4 bit quantization. Requires a specific AWQ quantized model: /// https://hf.co/models?search=awq. /// Should replace GPTQ models wherever possible because of the better latency Awq, /// 8 bit quantization, doesn't require specific model. /// Should be a drop-in replacement to bitsandbytes with much better performance. /// Kernels are from https://github.com/NetEase-FuXi/EETQ.git Eetq, /// 4 bit quantization. Requires a specific GTPQ quantized model: https://hf.co/models?search=gptq. /// text-generation-inference will use exllama (faster) kernels wherever possible, and use /// triton kernel (wider support) when it's not. /// AWQ has faster kernels. Gptq, /// Bitsandbytes 8bit. Can be applied on any model, will cut the memory requirement in half, /// but it is known that the model will be much slower to run than the native f16. #[deprecated( since = "1.1.0", note = "Use `eetq` instead, which provides better latencies overall and is drop-in in most cases" )] Bitsandbytes, /// Bitsandbytes 4bit. Can be applied on any model, will cut the memory requirement by 4x, /// but it is known that the model will be much slower to run than the native f16. BitsandbytesNF4, /// Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better /// perplexity performance for you model BitsandbytesFP4, } impl std::fmt::Display for Quantization { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // To keep in track with `server`. match self { #[allow(deprecated)] // Use `eetq` instead, which provides better latencies overall and is drop-in in most cases Quantization::Bitsandbytes => { write!(f, "bitsandbytes") } Quantization::BitsandbytesNF4 => { write!(f, "bitsandbytes-nf4") } Quantization::BitsandbytesFP4 => { write!(f, "bitsandbytes-fp4") } Quantization::Gptq => { write!(f, "gptq") } Quantization::Awq => { write!(f, "awq") } Quantization::Eetq => { write!(f, "eetq") } } } } #[derive(Clone, Copy, Debug, ValueEnum)] enum Dtype { Float16, #[clap(name = "bfloat16")] BFloat16, } impl std::fmt::Display for Dtype { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // To keep in track with `server`. match self { Dtype::Float16 => { write!(f, "float16") } Dtype::BFloat16 => { write!(f, "bfloat16") } } } } #[derive(Clone, Copy, Debug, ValueEnum)] enum RopeScaling { Linear, Dynamic, } impl std::fmt::Display for RopeScaling { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // To keep in track with `server`. match self { RopeScaling::Linear => { write!(f, "linear") } RopeScaling::Dynamic => { write!(f, "dynamic") } } } } /// App Configuration #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] struct Args { /// The name of the model to load. /// Can be a MODEL_ID as listed on <https://hf.co/models> like /// `gpt2` or `OpenAssistant/oasst-sft-1-pythia-12b`. /// Or it can be a local directory containing the necessary files /// as saved by `save_pretrained(...)` methods of transformers #[clap(default_value = "bigscience/bloom-560m", long, env)] model_id: String, /// The actual revision of the model if you're referring to a model /// on the hub. You can use a specific commit id or a branch like `refs/pr/2`. #[clap(long, env)] revision: Option<String>, /// The number of tokenizer workers used for payload validation and truncation inside the /// router. #[clap(default_value = "2", long, env)] validation_workers: usize, /// Whether to shard the model across multiple GPUs /// By default text-generation-inference will use all available GPUs to run /// the model. Setting it to `false` deactivates `num_shard`. #[clap(long, env)] sharded: Option<bool>, /// The number of shards to use if you don't want to use all GPUs on a given machine. /// You can use `CUDA_VISIBLE_DEVICES=0,1 text-generation-launcher... --num_shard 2` /// and `CUDA_VISIBLE_DEVICES=2,3 text-generation-launcher... --num_shard 2` to /// launch 2 copies with 2 shard each on a given machine with 4 GPUs for instance. #[clap(long, env)] num_shard: Option<usize>, /// Whether you want the model to be quantized. #[clap(long, env, value_enum)] quantize: Option<Quantization>, /// The number of input_ids to speculate on /// If using a medusa model, the heads will be picked up automatically /// Other wise, it will use n-gram speculation which is relatively free /// in terms of compute, but the speedup heavily depends on the task. #[clap(long, env)] speculate: Option<usize>, /// The dtype to be forced upon the model. This option cannot be used with `--quantize`. #[clap(long, env, value_enum)] dtype: Option<Dtype>, /// Whether you want to execute hub modelling code. Explicitly passing a `revision` is /// encouraged when loading a model with custom code to ensure no malicious code has been /// contributed in a newer revision. #[clap(long, env, value_enum)] trust_remote_code: bool, /// The maximum amount of concurrent requests for this particular deployment. /// Having a low limit will refuse clients requests instead of having them /// wait for too long and is usually good to handle backpressure correctly. #[clap(default_value = "128", long, env)] max_concurrent_requests: usize, /// This is the maximum allowed value for clients to set `best_of`. /// Best of makes `n` generations at the same time, and return the best /// in terms of overall log probability over the entire generated sequence #[clap(default_value = "2", long, env)] max_best_of: usize, /// This is the maximum allowed value for clients to set `stop_sequences`. /// Stop sequences are used to allow the model to stop on more than just /// the EOS token, and enable more complex "prompting" where users can preprompt /// the model in a specific way and define their "own" stop token aligned with /// their prompt. #[clap(default_value = "4", long, env)] max_stop_sequences: usize, /// This is the maximum allowed value for clients to set `top_n_tokens`. /// `top_n_tokens is used to return information about the the `n` most likely /// tokens at each generation step, instead of just the sampled token. This /// information can be used for downstream tasks like for classification or /// ranking. #[clap(default_value = "5", long, env)] max_top_n_tokens: u32, /// This is the maximum allowed input length (expressed in number of tokens) /// for users. The larger this value, the longer prompt users can send which /// can impact the overall memory required to handle the load. /// Please note that some models have a finite range of sequence they can handle. #[clap(default_value = "1024", long, env)] max_input_length: usize, /// This is the most important value to set as it defines the "memory budget" /// of running clients requests. /// Clients will send input sequences and ask to generate `max_new_tokens` /// on top. with a value of `1512` users can send either a prompt of /// `1000` and ask for `512` new tokens, or send a prompt of `1` and ask for /// `1511` max_new_tokens. /// The larger this value, the larger amount each request will be in your RAM /// and the less effective batching can be. #[clap(default_value = "2048", long, env)] max_total_tokens: usize, /// This represents the ratio of waiting queries vs running queries where /// you want to start considering pausing the running queries to include the waiting /// ones into the same batch. /// `waiting_served_ratio=1.2` Means when 12 queries are waiting and there's /// only 10 queries left in the current batch we check if we can fit those 12 /// waiting queries into the batching strategy, and if yes, then batching happens /// delaying the 10 running queries by a `prefill` run. /// /// This setting is only applied if there is room in the batch /// as defined by `max_batch_total_tokens`. #[clap(default_value = "1.2", long, env)] waiting_served_ratio: f32, /// Limits the number of tokens for the prefill operation. /// Since this operation take the most memory and is compute bound, it is interesting /// to limit the number of requests that can be sent. #[clap(default_value = "4096", long, env)] max_batch_prefill_tokens: u32, /// **IMPORTANT** This is one critical control to allow maximum usage /// of the available hardware. /// /// This represents the total amount of potential tokens within a batch. /// When using padding (not recommended) this would be equivalent of /// `batch_size` * `max_total_tokens`. /// /// However in the non-padded (flash attention) version this can be much finer. /// /// For `max_batch_total_tokens=1000`, you could fit `10` queries of `total_tokens=100` /// or a single query of `1000` tokens. /// /// Overall this number should be the largest possible amount that fits the /// remaining memory (after the model is loaded). Since the actual memory overhead /// depends on other parameters like if you're using quantization, flash attention /// or the model implementation, text-generation-inference cannot infer this number /// automatically. #[clap(long, env)] max_batch_total_tokens: Option<u32>, /// This setting defines how many tokens can be passed before forcing the waiting /// queries to be put on the batch (if the size of the batch allows for it). /// New queries require 1 `prefill` forward, which is different from `decode` /// and therefore you need to pause the running batch in order to run `prefill` /// to create the correct values for the waiting queries to be able to join the batch. /// /// With a value too small, queries will always "steal" the compute to run `prefill` /// and running queries will be delayed by a lot. /// /// With a value too big, waiting queries could wait for a very long time /// before being allowed a slot in the running batch. If your server is busy /// that means that requests that could run in ~2s on an empty server could /// end up running in ~20s because the query had to wait for 18s. /// /// This number is expressed in number of tokens to make it a bit more /// "model" agnostic, but what should really matter is the overall latency /// for end users. #[clap(default_value = "20", long, env)] max_waiting_tokens: usize, /// The IP address to listen on #[clap(default_value = "0.0.0.0", long, env)] hostname: String, /// The port to listen on. #[clap(default_value = "3000", long, short, env)] port: u16, /// The name of the socket for gRPC communication between the webserver /// and the shards. #[clap(default_value = "/tmp/text-generation-server", long, env)] shard_uds_path: String, /// The address the master shard will listen on. (setting used by torch distributed) #[clap(default_value = "localhost", long, env)] master_addr: String, /// The address the master port will listen on. (setting used by torch distributed) #[clap(default_value = "29500", long, env)] master_port: usize, /// The location of the huggingface hub cache. /// Used to override the location if you want to provide a mounted disk for instance #[clap(long, env)] huggingface_hub_cache: Option<String>, /// The location of the huggingface hub cache. /// Used to override the location if you want to provide a mounted disk for instance #[clap(long, env)] weights_cache_override: Option<String>, /// For some models (like bloom), text-generation-inference implemented custom /// cuda kernels to speed up inference. Those kernels were only tested on A100. /// Use this flag to disable them if you're running on different hardware and /// encounter issues. #[clap(long, env)] disable_custom_kernels: bool, /// Limit the CUDA available memory. /// The allowed value equals the total visible memory multiplied by cuda-memory-fraction. #[clap(default_value = "1.0", long, env)] cuda_memory_fraction: f32, /// Rope scaling will only be used for RoPE models /// and allow rescaling the position rotary to accomodate for /// larger prompts. /// /// Goes together with `rope_factor`. /// /// `--rope-factor 2.0` gives linear scaling with a factor of 2.0 /// `--rope-scaling dynamic` gives dynamic scaling with a factor of 1.0 /// `--rope-scaling linear` gives linear scaling with a factor of 1.0 (Nothing will be changed /// basically) /// /// `--rope-scaling linear --rope-factor` fully describes the scaling you want #[clap(long, env)] rope_scaling: Option<RopeScaling>, /// Rope scaling will only be used for RoPE models /// See `rope_scaling` #[clap(long, env)] rope_factor: Option<f32>, /// Outputs the logs in JSON format (useful for telemetry) #[clap(long, env)] json_output: bool, #[clap(long, env)] otlp_endpoint: Option<String>, #[clap(long, env)] cors_allow_origin: Vec<String>, #[clap(long, env)] watermark_gamma: Option<f32>, #[clap(long, env)] watermark_delta: Option<f32>, /// Enable ngrok tunneling #[clap(long, env)] ngrok: bool, /// ngrok authentication token #[clap(long, env)] ngrok_authtoken: Option<String>, /// ngrok edge #[clap(long, env)] ngrok_edge: Option<String>, /// The path to the tokenizer config file. This path is used to load the tokenizer configuration which may /// include a `chat_template`. If not provided, the default config will be used from the model hub. #[clap(long, env)] tokenizer_config_path: Option<String>, /// Display a lot of information about your runtime environment #[clap(long, short, action)] env: bool, } #[derive(Debug)] enum ShardStatus { Ready, Failed(usize), } #[allow(clippy::too_many_arguments)] fn shard_manager( model_id: String, revision: Option<String>, quantize: Option<Quantization>, speculate: Option<usize>, dtype: Option<Dtype>, trust_remote_code: bool, uds_path: String, rank: usize, world_size: usize, master_addr: String, master_port: usize, huggingface_hub_cache: Option<String>, weights_cache_override: Option<String>, disable_custom_kernels: bool, watermark_gamma: Option<f32>, watermark_delta: Option<f32>, cuda_memory_fraction: f32, rope_scaling: Option<RopeScaling>, rope_factor: Option<f32>, otlp_endpoint: Option<String>, status_sender: mpsc::Sender<ShardStatus>, shutdown: Arc<AtomicBool>, _shutdown_sender: mpsc::Sender<()>, ) { // Enter shard-manager tracing span let _span = tracing::span!(tracing::Level::INFO, "shard-manager", rank = rank).entered(); // Get UDS path let uds_string = format!("{uds_path}-{rank}"); let uds = Path::new(&uds_string); // Clean previous runs if uds.exists() { fs::remove_file(uds).unwrap(); } // Process args let mut shard_args = vec![ "serve".to_string(), model_id, "--uds-path".to_string(), uds_path, "--logger-level".to_string(), "INFO".to_string(), "--json-output".to_string(), ]; // Activate trust remote code if trust_remote_code { shard_args.push("--trust-remote-code".to_string()); } // Activate tensor parallelism if world_size > 1 { shard_args.push("--sharded".to_string()); } if let Some(quantize) = quantize { shard_args.push("--quantize".to_string()); shard_args.push(quantize.to_string()) } if let Some(speculate) = speculate { shard_args.push("--speculate".to_string()); shard_args.push(speculate.to_string()) } if let Some(dtype) = dtype { shard_args.push("--dtype".to_string()); shard_args.push(dtype.to_string()) } // Model optional revision if let Some(revision) = revision { shard_args.push("--revision".to_string()); shard_args.push(revision) } let rope = match (rope_scaling, rope_factor) { (None, None) => None, (Some(scaling), None) => Some((scaling, 1.0)), (Some(scaling), Some(factor)) => Some((scaling, factor)), (None, Some(factor)) => Some((RopeScaling::Linear, factor)), }; // OpenTelemetry if let Some(otlp_endpoint) = otlp_endpoint { shard_args.push("--otlp-endpoint".to_string()); shard_args.push(otlp_endpoint); } // Copy current process env let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect(); // Torch Distributed Env vars envs.push(("RANK".into(), rank.to_string().into())); envs.push(("WORLD_SIZE".into(), world_size.to_string().into())); envs.push(("MASTER_ADDR".into(), master_addr.into())); envs.push(("MASTER_PORT".into(), master_port.to_string().into())); envs.push(("NCCL_ASYNC_ERROR_HANDLING".into(), "1".into())); // CUDA memory fraction envs.push(( "CUDA_MEMORY_FRACTION".into(), cuda_memory_fraction.to_string().into(), )); // Safetensors load fast envs.push(("SAFETENSORS_FAST_GPU".into(), "1".into())); // Disable progress bar envs.push(("HF_HUB_DISABLE_PROGRESS_BARS".into(), "1".into())); // Enable hf transfer for insane download speeds let enable_hf_transfer = env::var("HF_HUB_ENABLE_HF_TRANSFER").unwrap_or("1".to_string()); envs.push(( "HF_HUB_ENABLE_HF_TRANSFER".into(), enable_hf_transfer.into(), )); // Parse Inference API token if let Ok(api_token) = env::var("HF_API_TOKEN") { envs.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into())) }; // Detect rope scaling // Sending as env instead of CLI args to not bloat everything // those only can be used by RoPE models, so passing information around // for all models will complexify code unnecessarily if let Some((scaling, factor)) = rope { envs.push(("ROPE_SCALING".into(), scaling.to_string().into())); envs.push(("ROPE_FACTOR".into(), factor.to_string().into())); } // If huggingface_hub_cache is some, pass it to the shard // Useful when running inside a docker container if let Some(huggingface_hub_cache) = huggingface_hub_cache { envs.push(("HUGGINGFACE_HUB_CACHE".into(), huggingface_hub_cache.into())); }; // If weights_cache_override is some, pass it to the shard // Useful when running inside a HuggingFace Inference Endpoint if let Some(weights_cache_override) = weights_cache_override { envs.push(( "WEIGHTS_CACHE_OVERRIDE".into(), weights_cache_override.into(), )); }; // If disable_custom_kernels is true, pass it to the shard as an env var if disable_custom_kernels { envs.push(("DISABLE_CUSTOM_KERNELS".into(), "True".into())) } // Watermark Gamma if let Some(watermark_gamma) = watermark_gamma { envs.push(("WATERMARK_GAMMA".into(), watermark_gamma.to_string().into())) } // Watermark Delta if let Some(watermark_delta) = watermark_delta { envs.push(("WATERMARK_DELTA".into(), watermark_delta.to_string().into())) } // Start process tracing::info!("Starting shard"); let mut p = match Command::new("text-generation-server") .args(shard_args) .envs(envs) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .process_group(0) .spawn() { Ok(p) => p, Err(err) => { if err.kind() == io::ErrorKind::NotFound { tracing::error!("text-generation-server not found in PATH"); tracing::error!("Please install it with `make install-server`") } { tracing::error!("{}", err); } status_sender.send(ShardStatus::Failed(rank)).unwrap(); return; } }; // Redirect STDOUT to the console let shard_stdout_reader = BufReader::new(p.stdout.take().unwrap()); let shard_stderr_reader = BufReader::new(p.stderr.take().unwrap()); //stdout tracing thread thread::spawn(move || { log_lines(shard_stdout_reader.lines()); }); // We read stderr in another thread as it seems that lines() can block in some cases let (err_sender, err_receiver) = mpsc::channel(); thread::spawn(move || { for line in shard_stderr_reader.lines().flatten() { err_sender.send(line).unwrap_or(()); } }); let mut ready = false; let start_time = Instant::now(); let mut wait_time = Instant::now(); loop { // Process exited if let Some(exit_status) = p.try_wait().unwrap() { let mut err = String::new(); while let Ok(line) = err_receiver.recv_timeout(Duration::from_millis(10)) { err = err + "\n" + &line; } tracing::error!("Shard complete standard error output:\n{err}"); if let Some(signal) = exit_status.signal() { tracing::error!("Shard process was signaled to shutdown with signal {signal}"); } status_sender.send(ShardStatus::Failed(rank)).unwrap(); return; } // We received a shutdown signal if shutdown.load(Ordering::SeqCst) { p.kill().unwrap(); let _ = p.wait(); tracing::info!("Shard terminated"); return; } // Shard is ready if uds.exists() && !ready { tracing::info!("Shard ready in {:?}", start_time.elapsed()); status_sender.send(ShardStatus::Ready).unwrap(); ready = true; } else if !ready && wait_time.elapsed() > Duration::from_secs(10) { tracing::info!("Waiting for shard to be ready..."); wait_time = Instant::now(); } sleep(Duration::from_millis(100)); } } fn shutdown_shards(shutdown: Arc<AtomicBool>, shutdown_receiver: &mpsc::Receiver<()>) { tracing::info!("Shutting down shards"); // Update shutdown value to true // This will be picked up by the shard manager shutdown.store(true, Ordering::SeqCst); // Wait for shards to shutdown // This will block till all shutdown_sender are dropped let _ = shutdown_receiver.recv(); } fn num_cuda_devices() -> Option<usize> { let devices = match env::var("CUDA_VISIBLE_DEVICES") { Ok(devices) => devices, Err(_) => env::var("NVIDIA_VISIBLE_DEVICES").ok()?, }; let n_devices = devices.split(',').count(); Some(n_devices) } #[derive(Deserialize)] #[serde(rename_all = "UPPERCASE")] enum PythonLogLevelEnum { Trace, Debug, Info, Success, Warning, Error, Critical, } #[derive(Deserialize)] struct PythonLogLevel { name: PythonLogLevelEnum, } #[derive(Deserialize)] struct PythonLogRecord { level: PythonLogLevel, } #[derive(Deserialize)] struct PythonLogMessage { text: String, record: PythonLogRecord, } impl PythonLogMessage { fn trace(&self) { match self.record.level.name { PythonLogLevelEnum::Trace => tracing::trace!("{}", self.text), PythonLogLevelEnum::Debug => tracing::debug!("{}", self.text), PythonLogLevelEnum::Info => tracing::info!("{}", self.text), PythonLogLevelEnum::Success => tracing::info!("{}", self.text), PythonLogLevelEnum::Warning => tracing::warn!("{}", self.text), PythonLogLevelEnum::Error => tracing::error!("{}", self.text), PythonLogLevelEnum::Critical => tracing::error!("{}", self.text), } } } impl TryFrom<&String> for PythonLogMessage { type Error = serde_json::Error; fn try_from(value: &String) -> Result<Self, Self::Error> { serde_json::from_str::<Self>(value) } } fn log_lines<S: Sized + BufRead>(lines: Lines<S>) { for line in lines.flatten() { match PythonLogMessage::try_from(&line) { Ok(log) => log.trace(), Err(_) => tracing::debug!("{line}"), } } } fn find_num_shards( sharded: Option<bool>, num_shard: Option<usize>, ) -> Result<usize, LauncherError> { // get the number of shards given `sharded` and `num_shard` let num_shard = match (sharded, num_shard) { (Some(true), None) => { // try to default to the number of available GPUs tracing::info!("Parsing num_shard from CUDA_VISIBLE_DEVICES/NVIDIA_VISIBLE_DEVICES"); let n_devices = num_cuda_devices() .expect("--num-shard and CUDA_VISIBLE_DEVICES/NVIDIA_VISIBLE_DEVICES are not set"); if n_devices <= 1 { return Err(LauncherError::NotEnoughCUDADevices(format!( "`sharded` is true but only found {n_devices} CUDA devices" ))); } n_devices } (Some(true), Some(num_shard)) => { // we can't have only one shard while sharded if num_shard <= 1 { return Err(LauncherError::ArgumentValidation( "`sharded` is true but `num_shard` <= 1".to_string(), )); } num_shard } (Some(false), Some(num_shard)) => num_shard, (Some(false), None) => 1, (None, None) => num_cuda_devices().unwrap_or(1), (None, Some(num_shard)) => num_shard, }; if num_shard < 1 { return Err(LauncherError::ArgumentValidation( "`num_shard` cannot be < 1".to_string(), )); } Ok(num_shard) } #[derive(Debug)] enum LauncherError { ArgumentValidation(String), NotEnoughCUDADevices(String), DownloadError, ShardCannotStart, ShardDisconnected, ShardFailed, WebserverFailed, WebserverCannotStart, } fn download_convert_model(args: &Args, running: Arc<AtomicBool>) -> Result<(), LauncherError> { // Enter download tracing span let _span = tracing::span!(tracing::Level::INFO, "download").entered(); let mut download_args = vec![ "download-weights".to_string(), args.model_id.to_string(), "--extension".to_string(), ".safetensors".to_string(), "--logger-level".to_string(), "INFO".to_string(), "--json-output".to_string(), ]; // Model optional revision if let Some(revision) = &args.revision { download_args.push("--revision".to_string()); download_args.push(revision.to_string()) } // Trust remote code for automatic peft fusion if args.trust_remote_code { download_args.push("--trust-remote-code".to_string()); } // Copy current process env let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect(); // Disable progress bar envs.push(("HF_HUB_DISABLE_PROGRESS_BARS".into(), "1".into())); // If huggingface_hub_cache is set, pass it to the download process // Useful when running inside a docker container if let Some(ref huggingface_hub_cache) = args.huggingface_hub_cache { envs.push(("HUGGINGFACE_HUB_CACHE".into(), huggingface_hub_cache.into())); }; // Enable hf transfer for insane download speeds let enable_hf_transfer = env::var("HF_HUB_ENABLE_HF_TRANSFER").unwrap_or("1".to_string()); envs.push(( "HF_HUB_ENABLE_HF_TRANSFER".into(), enable_hf_transfer.into(), )); // Parse Inference API token if let Ok(api_token) = env::var("HF_API_TOKEN") { envs.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into())) }; // If args.weights_cache_override is some, pass it to the download process // Useful when running inside a HuggingFace Inference Endpoint if let Some(weights_cache_override) = &args.weights_cache_override { envs.push(( "WEIGHTS_CACHE_OVERRIDE".into(), weights_cache_override.into(), )); }; // Start process tracing::info!("Starting download process."); let mut download_process = match Command::new("text-generation-server") .args(download_args) .envs(envs) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .process_group(0) .spawn() { Ok(p) => p, Err(err) => { if err.kind() == io::ErrorKind::NotFound { tracing::error!("text-generation-server not found in PATH"); tracing::error!("Please install it with `make install-server`") } else { tracing::error!("{}", err); } return Err(LauncherError::DownloadError); } }; let download_stdout = BufReader::new(download_process.stdout.take().unwrap()); thread::spawn(move || { log_lines(download_stdout.lines()); }); let download_stderr = BufReader::new(download_process.stderr.take().unwrap()); // We read stderr in another thread as it seems that lines() can block in some cases let (err_sender, err_receiver) = mpsc::channel(); thread::spawn(move || { for line in download_stderr.lines().flatten() { err_sender.send(line).unwrap_or(()); } }); loop { if let Some(status) = download_process.try_wait().unwrap() { if status.success() { tracing::info!("Successfully downloaded weights."); break; } let mut err = String::new(); while let Ok(line) = err_receiver.recv_timeout(Duration::from_millis(10)) { err = err + "\n" + &line; } if let Some(signal) = status.signal() { tracing::error!( "Download process was signaled to shutdown with signal {signal}: {err}" ); } else { tracing::error!("Download encountered an error: {err}"); } return Err(LauncherError::DownloadError); } if !running.load(Ordering::SeqCst) { terminate("download", download_process, Duration::from_secs(10)).unwrap(); return Ok(()); } sleep(Duration::from_millis(100)); } Ok(()) } #[allow(clippy::too_many_arguments)] fn spawn_shards( num_shard: usize, args: &Args, shutdown: Arc<AtomicBool>, shutdown_receiver: &mpsc::Receiver<()>, shutdown_sender: mpsc::Sender<()>, status_receiver: &mpsc::Receiver<ShardStatus>, status_sender: mpsc::Sender<ShardStatus>, running: Arc<AtomicBool>, ) -> Result<(), LauncherError> { // Start shard processes for rank in 0..num_shard { let model_id = args.model_id.clone(); let revision = args.revision.clone(); let uds_path = args.shard_uds_path.clone(); let master_addr = args.master_addr.clone(); let huggingface_hub_cache = args.huggingface_hub_cache.clone(); let weights_cache_override = args.weights_cache_override.clone(); let status_sender = status_sender.clone(); let shutdown = shutdown.clone(); let shutdown_sender = shutdown_sender.clone(); let otlp_endpoint = args.otlp_endpoint.clone(); let quantize = args.quantize; let speculate = args.speculate; let dtype = args.dtype; let trust_remote_code = args.trust_remote_code; let master_port = args.master_port; let disable_custom_kernels = args.disable_custom_kernels; let watermark_gamma = args.watermark_gamma; let watermark_delta = args.watermark_delta; let cuda_memory_fraction = args.cuda_memory_fraction; let rope_scaling = args.rope_scaling; let rope_factor = args.rope_factor; thread::spawn(move || { shard_manager( model_id, revision, quantize, speculate, dtype, trust_remote_code, uds_path, rank, num_shard, master_addr, master_port, huggingface_hub_cache, weights_cache_override, disable_custom_kernels, watermark_gamma, watermark_delta, cuda_memory_fraction, rope_scaling, rope_factor, otlp_endpoint, status_sender, shutdown, shutdown_sender, ) }); } drop(shutdown_sender); // Wait for shard to start let mut shard_ready = 0; while running.load(Ordering::SeqCst) { match status_receiver.try_recv() { Ok(ShardStatus::Ready) => { shard_ready += 1; if shard_ready == num_shard { break; } } Err(TryRecvError::Empty) => { sleep(Duration::from_millis(100)); } Ok(ShardStatus::Failed(rank)) => { tracing::error!("Shard {rank} failed to start"); shutdown_shards(shutdown, shutdown_receiver); return Err(LauncherError::ShardCannotStart); } Err(TryRecvError::Disconnected) => { tracing::error!("Shard status channel disconnected"); shutdown_shards(shutdown, shutdown_receiver); return Err(LauncherError::ShardDisconnected); } } } Ok(()) } fn compute_type(num_shard: usize) -> Option<String> { let output = Command::new("nvidia-smi") .args(["--query-gpu=gpu_name", "--format=csv"]) .output() .ok()?; let output = String::from_utf8(output.stdout).ok()?; let fullname = output.split('\n').nth(1)?; let cardname = fullname.replace(' ', "-").to_lowercase(); let compute_type = format!("{num_shard}-{cardname}"); Some(compute_type) } fn spawn_webserver( num_shard: usize, args: Args, shutdown: Arc<AtomicBool>, shutdown_receiver: &mpsc::Receiver<()>, ) -> Result<Child, LauncherError> { // All shard started // Start webserver tracing::info!("Starting Webserver"); let mut router_args = vec![ "--max-concurrent-requests".to_string(), args.max_concurrent_requests.to_string(), "--max-best-of".to_string(), args.max_best_of.to_string(), "--max-stop-sequences".to_string(), args.max_stop_sequences.to_string(), "--max-top-n-tokens".to_string(), args.max_top_n_tokens.to_string(), "--max-input-length".to_string(), args.max_input_length.to_string(), "--max-total-tokens".to_string(), args.max_total_tokens.to_string(), "--max-batch-prefill-tokens".to_string(), args.max_batch_prefill_tokens.to_string(), "--waiting-served-ratio".to_string(), args.waiting_served_ratio.to_string(), "--max-waiting-tokens".to_string(), args.max_waiting_tokens.to_string(), "--validation-workers".to_string(), args.validation_workers.to_string(), "--hostname".to_string(), args.hostname.to_string(), "--port".to_string(), args.port.to_string(), "--master-shard-uds-path".to_string(), format!("{}-0", args.shard_uds_path), "--tokenizer-name".to_string(), args.model_id, ]; // Tokenizer config path if let Some(ref tokenizer_config_path) = args.tokenizer_config_path { router_args.push("--tokenizer-config-path".to_string()); router_args.push(tokenizer_config_path.to_string()); } // Model optional max batch total tokens if let Some(max_batch_total_tokens) = args.max_batch_total_tokens { router_args.push("--max-batch-total-tokens".to_string()); router_args.push(max_batch_total_tokens.to_string()); } // Model optional revision if let Some(ref revision) = args.revision { router_args.push("--revision".to_string()); router_args.push(revision.to_string()) } if args.json_output { router_args.push("--json-output".to_string()); } // OpenTelemetry if let Some(otlp_endpoint) = args.otlp_endpoint { router_args.push("--otlp-endpoint".to_string()); router_args.push(otlp_endpoint); } // CORS origins for origin in args.cors_allow_origin.into_iter() { router_args.push("--cors-allow-origin".to_string()); router_args.push(origin); } // Ngrok if args.ngrok { router_args.push("--ngrok".to_string()); router_args.push("--ngrok-authtoken".to_string()); router_args.push(args.ngrok_authtoken.unwrap()); router_args.push("--ngrok-edge".to_string()); router_args.push(args.ngrok_edge.unwrap()); } // Copy current process env let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect(); // Parse Inference API token if let Ok(api_token) = env::var("HF_API_TOKEN") { envs.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into())) }; // Parse Compute type if let Ok(compute_type) = env::var("COMPUTE_TYPE") { envs.push(("COMPUTE_TYPE".into(), compute_type.into())) } else if let Some(compute_type) = compute_type(num_shard) { envs.push(("COMPUTE_TYPE".into(), compute_type.into())) } let mut webserver = match Command::new("text-generation-router") .args(router_args) .envs(envs) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .process_group(0) .spawn() { Ok(p) => p, Err(err) => { tracing::error!("Failed to start webserver: {}", err); if err.kind() == io::ErrorKind::NotFound { tracing::error!("text-generation-router not found in PATH"); tracing::error!("Please install it with `make install-router`") } else { tracing::error!("{}", err); } shutdown_shards(shutdown, shutdown_receiver); return Err(LauncherError::WebserverCannotStart); } }; // Redirect STDOUT and STDERR to the console let webserver_stdout = webserver.stdout.take().unwrap(); let webserver_stderr = webserver.stderr.take().unwrap(); thread::spawn(move || { let stdout = BufReader::new(webserver_stdout); let stderr = BufReader::new(webserver_stderr); for line in stdout.lines() { println!("{}", line.unwrap()); } for line in stderr.lines() { println!("{}", line.unwrap()); } }); Ok(webserver) } fn terminate(process_name: &str, mut process: Child, timeout: Duration) -> io::Result<ExitStatus> { tracing::info!("Terminating {process_name}"); let terminate_time = Instant::now(); signal::kill(Pid::from_raw(process.id() as i32), Signal::SIGTERM).unwrap(); tracing::info!("Waiting for {process_name} to gracefully shutdown"); while terminate_time.elapsed() < timeout { if let Some(status) = process.try_wait()? { tracing::info!("{process_name} terminated"); return Ok(status); } sleep(Duration::from_millis(100)); } tracing::info!("Killing {process_name}"); process.kill()?; let exit_status = process.wait()?; tracing::info!("{process_name} killed"); Ok(exit_status) } fn main() -> Result<(), LauncherError> { // Pattern match configuration let args: Args = Args::parse(); // Filter events with LOG_LEVEL let env_filter = EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info")); if args.json_output { tracing_subscriber::fmt() .with_env_filter(env_filter) .json() .init(); } else { tracing_subscriber::fmt() .with_env_filter(env_filter) .compact() .init(); } if args.env { let env_runtime = env_runtime::Env::new(); tracing::info!("{}", env_runtime); } tracing::info!("{:?}", args); // Validate args if args.max_input_length >= args.max_total_tokens { return Err(LauncherError::ArgumentValidation( "`max_input_length` must be < `max_total_tokens`".to_string(), )); } if args.max_input_length as u32 > args.max_batch_prefill_tokens { return Err(LauncherError::ArgumentValidation(format!( "`max_batch_prefill_tokens` must be >= `max_input_length`. Given: {} and {}", args.max_batch_prefill_tokens, args.max_input_length ))); } if args.validation_workers == 0 { return Err(LauncherError::ArgumentValidation( "`validation_workers` must be > 0".to_string(), )); } if args.trust_remote_code { tracing::warn!( "`trust_remote_code` is set. Trusting that model `{}` do not contain malicious code.", args.model_id ); } let num_shard = find_num_shards(args.sharded, args.num_shard)?; if num_shard > 1 { tracing::info!("Sharding model on {num_shard} processes"); } if let Some(ref max_batch_total_tokens) = args.max_batch_total_tokens { if args.max_batch_prefill_tokens > *max_batch_total_tokens { return Err(LauncherError::ArgumentValidation(format!( "`max_batch_prefill_tokens` must be <= `max_batch_total_tokens`. Given: {} and {}", args.max_batch_prefill_tokens, max_batch_total_tokens ))); } if args.max_total_tokens as u32 > *max_batch_total_tokens { return Err(LauncherError::ArgumentValidation(format!( "`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {} and {}", args.max_total_tokens, max_batch_total_tokens ))); } } if args.ngrok { if args.ngrok_authtoken.is_none() { return Err(LauncherError::ArgumentValidation( "`ngrok-authtoken` must be set when using ngrok tunneling".to_string(), )); } if args.ngrok_edge.is_none() { return Err(LauncherError::ArgumentValidation( "`ngrok-edge` must be set when using ngrok tunneling".to_string(), )); } } // Signal handler let running = Arc::new(AtomicBool::new(true)); let r = running.clone(); ctrlc::set_handler(move || { r.store(false, Ordering::SeqCst); }) .expect("Error setting Ctrl-C handler"); // Download and convert model weights download_convert_model(&args, running.clone())?; if !running.load(Ordering::SeqCst) { // Launcher was asked to stop return Ok(()); } // Shared shutdown bool let shutdown = Arc::new(AtomicBool::new(false)); // Shared shutdown channel // When shutting down, the main thread will wait for all senders to be dropped let (shutdown_sender, shutdown_receiver) = mpsc::channel(); // Shared channel to track shard status let (status_sender, status_receiver) = mpsc::channel(); spawn_shards( num_shard, &args, shutdown.clone(), &shutdown_receiver, shutdown_sender, &status_receiver, status_sender, running.clone(), )?; // We might have received a termination signal if !running.load(Ordering::SeqCst) { shutdown_shards(shutdown, &shutdown_receiver); return Ok(()); } let mut webserver = spawn_webserver(num_shard, args, shutdown.clone(), &shutdown_receiver) .map_err(|err| { shutdown_shards(shutdown.clone(), &shutdown_receiver); err })?; // Default exit code let mut exit_code = Ok(()); while running.load(Ordering::SeqCst) { if let Ok(ShardStatus::Failed(rank)) = status_receiver.try_recv() { tracing::error!("Shard {rank} crashed"); exit_code = Err(LauncherError::ShardFailed); break; }; match webserver.try_wait().unwrap() { Some(_) => { tracing::error!("Webserver Crashed"); shutdown_shards(shutdown, &shutdown_receiver); return Err(LauncherError::WebserverFailed); } None => { sleep(Duration::from_millis(100)); } }; } // Graceful termination terminate("webserver", webserver, Duration::from_secs(90)).unwrap(); shutdown_shards(shutdown, &shutdown_receiver); exit_code }
text-generation-inference/launcher/src/main.rs/0
{ "file_path": "text-generation-inference/launcher/src/main.rs", "repo_id": "text-generation-inference", "token_count": 19492 }
199
//! A crate to extract and inject a OpenTelemetry context from and to a gRPC request. //! Inspired by: https://github.com/open-telemetry/opentelemetry-rust gRPC examples use opentelemetry::global; use opentelemetry::propagation::{Extractor, Injector}; use tracing_opentelemetry::OpenTelemetrySpanExt; /// Extract context metadata from a gRPC request's metadata struct MetadataExtractor<'a>(pub &'a tonic::metadata::MetadataMap); impl<'a> Extractor for MetadataExtractor<'a> { /// Get a value for a key from the MetadataMap. If the value can't be converted to &str, returns None fn get(&self, key: &str) -> Option<&str> { self.0.get(key).and_then(|metadata| metadata.to_str().ok()) } /// Collect all the keys from the MetadataMap. fn keys(&self) -> Vec<&str> { self.0 .keys() .map(|key| match key { tonic::metadata::KeyRef::Ascii(v) => v.as_str(), tonic::metadata::KeyRef::Binary(v) => v.as_str(), }) .collect::<Vec<_>>() } } /// Inject context in the metadata of a gRPC request. struct MetadataInjector<'a>(pub &'a mut tonic::metadata::MetadataMap); impl<'a> Injector for MetadataInjector<'a> { /// Set a key and value in the MetadataMap. Does nothing if the key or value are not valid inputs fn set(&mut self, key: &str, value: String) { if let Ok(key) = tonic::metadata::MetadataKey::from_bytes(key.as_bytes()) { if let Ok(val) = value.parse() { self.0.insert(key, val); } } } } /// Get a context from the global context and inject the span into a gRPC request's metadata. fn inject(metadata: &mut tonic::metadata::MetadataMap) { global::get_text_map_propagator(|propagator| { propagator.inject_context( &tracing::Span::current().context(), &mut MetadataInjector(metadata), ) }) } pub trait InjectTelemetryContext { fn inject_context(self) -> Self; } impl<T> InjectTelemetryContext for tonic::Request<T> { fn inject_context(mut self) -> Self { inject(self.metadata_mut()); self } }
text-generation-inference/router/grpc-metadata/src/lib.rs/0
{ "file_path": "text-generation-inference/router/grpc-metadata/src/lib.rs", "repo_id": "text-generation-inference", "token_count": 889 }
200
vllm-cuda: # Clone vllm pip install -U ninja packaging --no-cache-dir git clone https://github.com/vllm-project/vllm.git vllm build-vllm-cuda: vllm-cuda cd vllm && git fetch && git checkout f8a1e39fae05ca610be8d5a78be9d40f5274e5fc cd vllm && python setup.py build install-vllm-cuda: build-vllm-cuda pip uninstall vllm -y || true cd vllm && python setup.py install vllm-rocm: # Clone vllm pip install -U ninja packaging --no-cache-dir git clone https://github.com/fxmarty/vllm-public.git vllm build-vllm-rocm: vllm-rocm cd vllm && git fetch && git checkout ad9b7c4095ef54419a0533d254f2ad84bd2dfcae cd vllm && python setup.py build install-vllm-rocm: build-vllm-rocm pip uninstall vllm -y || true cd vllm && python setup.py install
text-generation-inference/server/Makefile-vllm/0
{ "file_path": "text-generation-inference/server/Makefile-vllm", "repo_id": "text-generation-inference", "token_count": 332 }
201
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #ifndef _matrix_cuh #define _matrix_cuh #include <cuda_runtime.h> #include <cuda_fp16.h> class MatrixView_half { public: const half* data; const int height; const int width; __device__ __forceinline__ MatrixView_half(const half* data, const int height, const int width) : data(data), height(height), width(width) { } __device__ __forceinline__ half item(int row, int column) const { return data[row * width + column]; } __device__ __forceinline__ half2 item_half2(int row, int column) const { return ((half2*)data)[(row * width + column) / 2]; } __device__ __forceinline__ half2 item_half2half2(int row, int column) const { return __half2half2(data[row * width + column]); } __device__ __forceinline__ const half* item_ptr(int row, int column) const { return &data[row * width + column]; } }; class MatrixView_half_rw { public: half* data; const int height; const int width; __device__ __forceinline__ MatrixView_half_rw(half* data, const int height, const int width) : data(data), height(height), width(width) { } __device__ __forceinline__ half item(int row, int column) const { return data[row * width + column]; } __device__ __forceinline__ half2 item_half2(int row, int column) const { return ((half2*)data)[(row * width + column) / 2]; } __device__ __forceinline__ half* item_ptr(int row, int column) { return &data[row * width + column]; } __device__ __forceinline__ void set(int row, int column, half value) { data[row * width + column] = value; } __device__ __forceinline__ void set_half2(int row, int column, half2 value) { ((half2*)data)[(row * width + column) / 2] = value; } }; class MatrixView_q4_row { public: const uint32_t* data; const int height; const int width; __device__ __forceinline__ MatrixView_q4_row(const uint32_t* data, const int height, const int width) : data(data), height(height), width(width) { } __device__ __forceinline__ int item(int row, int column) const { int shift = (column & 0x07) * 4; return (data[row * width / 8 + column / 8] >> shift) & 0x0f; } }; class MatrixView_q4_column { public: const uint32_t* data; const int height; const int width; __device__ __forceinline__ MatrixView_q4_column(const uint32_t* data, const int height, const int width) : data(data), height(height), width(width) { } __device__ __forceinline__ int item(int row, int column) const { int shift = (row & 0x07) * 4; return (data[row / 8 * width + column] >> shift) & 0x0f; } __device__ __forceinline__ uint32_t item_uint32_t(int row, int column) { return data[row / 8 * width + column]; } __device__ __forceinline__ const uint32_t* item_uint32_ptr(int row, int column) { return &data[row / 8 * width + column]; } }; // TODO: Rewrite all these dot product functions using functors or something, move to q4_matmul.cu // Accumulated dot product of 8-element row vectors in h and quantized column vectors in v, constant zero/scale __device__ __forceinline__ half2 dot_product_8 ( const half2 acc, MatrixView_half& h_, const int h_row, const int h_column, // divisible by 8 MatrixView_q4_column& v_, const int v_row, // divisible by 8 const int v_column, const half2 v_scale_2, const uint32_t v_zero, // + 1 (!!) const int count ) { const half2* h_ptr = (const half2*) h_.item_ptr(h_row, h_column); const uint32_t* v_ptr = (const uint32_t*) v_.item_uint32_ptr(v_row, v_column); half2 result = acc; for (int i = 0; i < count; i++) { uint32_t v_read = *v_ptr; v_ptr += v_.width; half v_0 = __int2half_rn((int)((v_read ) & 0x0f) - v_zero); half v_1 = __int2half_rn((int)((v_read >> 4) & 0x0f) - v_zero); half v_2 = __int2half_rn((int)((v_read >> 8) & 0x0f) - v_zero); half v_3 = __int2half_rn((int)((v_read >> 12) & 0x0f) - v_zero); half v_4 = __int2half_rn((int)((v_read >> 16) & 0x0f) - v_zero); half v_5 = __int2half_rn((int)((v_read >> 20) & 0x0f) - v_zero); half v_6 = __int2half_rn((int)((v_read >> 24) & 0x0f) - v_zero); half v_7 = __int2half_rn((int)((v_read >> 28) ) - v_zero); half2 v_01 = __halves2half2(v_0, v_1); half2 v_23 = __halves2half2(v_2, v_3); half2 v_45 = __halves2half2(v_4, v_5); half2 v_67 = __halves2half2(v_6, v_7); // half2 v_01 = q4_table[v_zero - 1][(v_read ) & 0xff]; // (constant memory is too slow apparently) // half2 v_23 = q4_table[v_zero - 1][(v_read >> 8) & 0xff]; // half2 v_45 = q4_table[v_zero - 1][(v_read >> 16) & 0xff]; // half2 v_67 = q4_table[v_zero - 1][(v_read >> 24) ]; half2 tmp = __hmul2(*h_ptr++, v_01); tmp = __hfma2(*h_ptr++, v_23, tmp); tmp = __hfma2(*h_ptr++, v_45, tmp); tmp = __hfma2(*h_ptr++, v_67, tmp); result = __hfma2(v_scale_2, tmp, result); } return result; } __device__ __forceinline__ half dot_product_8_h ( const half acc, MatrixView_half& h_, const int h_row, const int h_column, // divisible by 8 MatrixView_q4_column& v_, const int v_row, // divisible by 8 const int v_column, const half v_scale, const uint32_t v_zero, // + 1 (!!) const int count ) { const half* h_ptr = h_.item_ptr(h_row, h_column); const uint32_t* v_ptr = (const uint32_t*) v_.item_uint32_ptr(v_row, v_column); half result = acc; for (int i = 0; i < count; i++) { uint32_t v_read = *v_ptr; v_ptr += v_.width; half v_0 = __int2half_rn((int)((v_read ) & 0x0f) - v_zero); half v_1 = __int2half_rn((int)((v_read >> 4) & 0x0f) - v_zero); half v_2 = __int2half_rn((int)((v_read >> 8) & 0x0f) - v_zero); half v_3 = __int2half_rn((int)((v_read >> 12) & 0x0f) - v_zero); half v_4 = __int2half_rn((int)((v_read >> 16) & 0x0f) - v_zero); half v_5 = __int2half_rn((int)((v_read >> 20) & 0x0f) - v_zero); half v_6 = __int2half_rn((int)((v_read >> 24) & 0x0f) - v_zero); half v_7 = __int2half_rn((int)((v_read >> 28) ) - v_zero); half tmp = __hmul(*h_ptr++, v_0); tmp = __hfma(*h_ptr++, v_1, tmp); tmp = __hfma(*h_ptr++, v_2, tmp); tmp = __hfma(*h_ptr++, v_3, tmp); tmp = __hfma(*h_ptr++, v_4, tmp); tmp = __hfma(*h_ptr++, v_5, tmp); tmp = __hfma(*h_ptr++, v_6, tmp); tmp = __hfma(*h_ptr++, v_7, tmp); result = __hfma(v_scale, tmp, result); } return result; } // Accumulated dot product of 8-element row vectors in h and quantized column vectors in v, constant zero/scale, with x_map __device__ __forceinline__ half2 dot_product_8_x_map ( const half2 acc, MatrixView_half& h_, const int h_row, const int h_column, // divisible by 8 MatrixView_q4_column& v_, const int v_row, // divisible by 8 const int v_column, const half2 v_scale_2, const uint32_t v_zero, // + 1 (!!) const int count, const uint32_t* x_map ) { const half* h_ptr = h_.item_ptr(h_row, 0); const uint32_t* x_map_ptr = x_map + h_column; const uint32_t* v_ptr = (const uint32_t*) v_.item_uint32_ptr(v_row, v_column); half2 result = acc; for (int i = 0; i < count; i++) { uint32_t v_read = *v_ptr; v_ptr += v_.width; half v_0 = __int2half_rn((int)((v_read ) & 0x0f) - v_zero); half v_1 = __int2half_rn((int)((v_read >> 4) & 0x0f) - v_zero); half v_2 = __int2half_rn((int)((v_read >> 8) & 0x0f) - v_zero); half v_3 = __int2half_rn((int)((v_read >> 12) & 0x0f) - v_zero); half v_4 = __int2half_rn((int)((v_read >> 16) & 0x0f) - v_zero); half v_5 = __int2half_rn((int)((v_read >> 20) & 0x0f) - v_zero); half v_6 = __int2half_rn((int)((v_read >> 24) & 0x0f) - v_zero); half v_7 = __int2half_rn((int)((v_read >> 28) ) - v_zero); half2 v_01 = __halves2half2(v_0, v_1); half2 v_23 = __halves2half2(v_2, v_3); half2 v_45 = __halves2half2(v_4, v_5); half2 v_67 = __halves2half2(v_6, v_7); half h_0 = h_ptr[*x_map_ptr++]; half h_1 = h_ptr[*x_map_ptr++]; half h_2 = h_ptr[*x_map_ptr++]; half h_3 = h_ptr[*x_map_ptr++]; half h_4 = h_ptr[*x_map_ptr++]; half h_5 = h_ptr[*x_map_ptr++]; half h_6 = h_ptr[*x_map_ptr++]; half h_7 = h_ptr[*x_map_ptr++]; half2 h_01 = __halves2half2(h_0, h_1); half2 h_23 = __halves2half2(h_2, h_3); half2 h_45 = __halves2half2(h_4, h_5); half2 h_67 = __halves2half2(h_6, h_7); half2 tmp = __hmul2(h_01, v_01); tmp = __hfma2(h_23, v_23, tmp); tmp = __hfma2(h_45, v_45, tmp); tmp = __hfma2(h_67, v_67, tmp); result = __hfma2(v_scale_2, tmp, result); } return result; } __device__ __forceinline__ half dot_product_8_x_map_h ( const half acc, MatrixView_half& h_, const int h_row, const int h_column, // divisible by 8 MatrixView_q4_column& v_, const int v_row, // divisible by 8 const int v_column, const half v_scale, const uint32_t v_zero, // + 1 (!!) const int count, const uint32_t* x_map ) { const half* h_ptr = h_.item_ptr(h_row, 0); const uint32_t* x_map_ptr = x_map + h_column; const uint32_t* v_ptr = (const uint32_t*) v_.item_uint32_ptr(v_row, v_column); half result = acc; for (int i = 0; i < count; i++) { uint32_t v_read = *v_ptr; v_ptr += v_.width; half v_0 = __int2half_rn((int)((v_read ) & 0x0f) - v_zero); half v_1 = __int2half_rn((int)((v_read >> 4) & 0x0f) - v_zero); half v_2 = __int2half_rn((int)((v_read >> 8) & 0x0f) - v_zero); half v_3 = __int2half_rn((int)((v_read >> 12) & 0x0f) - v_zero); half v_4 = __int2half_rn((int)((v_read >> 16) & 0x0f) - v_zero); half v_5 = __int2half_rn((int)((v_read >> 20) & 0x0f) - v_zero); half v_6 = __int2half_rn((int)((v_read >> 24) & 0x0f) - v_zero); half v_7 = __int2half_rn((int)((v_read >> 28) ) - v_zero); half tmp = __hmul(h_ptr[*x_map_ptr++], v_0); tmp = __hfma(h_ptr[*x_map_ptr++], v_1, tmp); tmp = __hfma(h_ptr[*x_map_ptr++], v_2, tmp); tmp = __hfma(h_ptr[*x_map_ptr++], v_3, tmp); tmp = __hfma(h_ptr[*x_map_ptr++], v_4, tmp); tmp = __hfma(h_ptr[*x_map_ptr++], v_5, tmp); tmp = __hfma(h_ptr[*x_map_ptr++], v_6, tmp); tmp = __hfma(h_ptr[*x_map_ptr++], v_7, tmp); result = __hfma(v_scale, tmp, result); } return result; } #endif
text-generation-inference/server/exllama_kernels/exllama_kernels/matrix.cuh/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/matrix.cuh", "repo_id": "text-generation-inference", "token_count": 5380 }
202
#ifndef _qdq_4_cuh #define _qdq_4_cuh #include "qdq_util.cuh" #include "../../config.h" #if QMODE_4BIT == 1 // Permutation: // // 77775555 33331111 66664444 22220000 __forceinline__ __device__ void shuffle_4bit_8 ( uint32_t* q, int stride ) { uint32_t qa = q[0]; uint32_t qb = 0; #pragma unroll for (int i = 0; i < 4; i++) { uint32_t qa0 = qa & 0x0f; uint32_t qa1 = (qa & 0xf0) >> 4; qa >>= 8; qb |= (qa1 << (i * 4 + 16)); qb |= (qa0 << (i * 4)); } q[0] = qb; } __forceinline__ __device__ void dequant_4bit_8 ( const uint32_t q_0, half2 (&dq)[4], int stride ) { const uint32_t c0 = 0x64006400; const half y16_ = __float2half_rn(1.0f / 16.0f); const half2 y16 = __halves2half2(y16_, y16_); const half z1_ = __float2half_rn(-1024.0f - 8.0f); const half z16_ = __float2half_rn(-1024.0f / 16.0f - 8.0f); const half2 z1 = __halves2half2(z1_, z1_); const half2 z16 = __halves2half2(z16_, z16_); uint32_t qa = q_0; half2_uint32 q0((qa & 0x000f000f) | c0); // half2(q[ 0], q[ 1]) + 1024 half2_uint32 q1((qa & 0x00f000f0) | c0); // half2(q[ 2], q[ 3]) * 16 + 1024 qa >>= 8; half2_uint32 q2((qa & 0x000f000f) | c0); // half2(q[ 4], q[ 5]) + 1024 half2_uint32 q3((qa & 0x00f000f0) | c0); // half2(q[ 6], q[ 7]) * 16 + 1024 dq[0] = __hadd2(q0.as_half2, z1); dq[1] = __hfma2(q1.as_half2, y16, z16); dq[2] = __hadd2(q2.as_half2, z1); dq[3] = __hfma2(q3.as_half2, y16, z16); } __forceinline__ __device__ void dequant_4bit_8_prep_zero_scale ( const uint32_t zero, const half scale, half2 (&z1z16)[2], half2 (&y1y16)[2] ) { half_uint16 z1(0xe400 | zero); // half(-1024.0f - zero); half z16 = __hsub(__int2half_rn(-64), __int2half_rn(zero)); half2 scale2 = __half2half2(scale); z1z16[0] = __hmul2(scale2, __half2half2(z1.as_half)); z1z16[1] = __hmul2(scale2, __half2half2(z16)); const half y1 = __float2half_rn(1.0f); const half y16 = __float2half_rn(1.0f / 16.0f); y1y16[0] = __hmul2(scale2, __half2half2(y1)); y1y16[1] = __hmul2(scale2, __half2half2(y16)); } __forceinline__ __device__ void dequant_4bit_8_prep_zero ( const uint32_t zero, half2(&z1z16)[2], half2(&y1y16)[2] ) { half_uint16 z1(0xe400 | zero); // half(-1024.0f - zero); half z16 = __hsub(__int2half_rn(-64), __int2half_rn(zero)); z1z16[0] = __half2half2(z1.as_half); z1z16[1] = __half2half2(z16); const half y1 = __float2half_rn(1.0f); const half y16 = __float2half_rn(1.0f / 16.0f); y1y16[0] = __half2half2(y1); y1y16[1] = __half2half2(y16); } __forceinline__ __device__ void dequant_4bit_8_gptq ( const uint32_t q_0, half2 (&dq)[4], half2 (&z1z16)[2], half2 (&y1y16)[2], int stride, bool scaled ) { const uint32_t c0 = 0x64006400; uint32_t qa = q_0; half2_uint32 q0((qa & 0x000f000f) | c0); // half2( q[0] + 1024, q[1] + 1024 ) half2_uint32 q1((qa & 0x00f000f0) | c0); // half2( q[2] * 16 + 1024, q[3] * 16 + 1024 ) qa >>= 8; half2_uint32 q2((qa & 0x000f000f) | c0); // half2( q[4] + 1024, q[5] + 1024 ) half2_uint32 q3((qa & 0x00f000f0) | c0); // half2( q[6] * 16 + 1024, q[7] * 16 + 1024 ) if (scaled) { dq[0] = __hfma2(q0.as_half2, y1y16[0], z1z16[0]); // half2( q[0] * s - z * s, q[1] * s - z * s) dq[1] = __hfma2(q1.as_half2, y1y16[1], z1z16[1]); // half2( q[2] * s - z * s, q[3] * s - z * s) dq[2] = __hfma2(q2.as_half2, y1y16[0], z1z16[0]); dq[3] = __hfma2(q3.as_half2, y1y16[1], z1z16[1]); } else { dq[0] = __hadd2(q0.as_half2, z1z16[0]); // half2( q[0] - z, q[1] - z ) dq[1] = __hfma2(q1.as_half2, y1y16[1], z1z16[1]); // half2( q[2] - z, q[3] - z ) dq[2] = __hadd2(q2.as_half2, z1z16[0]); // half2( q[4] - z, q[5] - z ) dq[3] = __hfma2(q3.as_half2, y1y16[1], z1z16[1]); // half2( q[6] - z, q[7] - z ) } } #else __forceinline__ __device__ void shuffle_4bit_8 ( uint32_t* q, int stride ) { } __forceinline__ __device__ void dequant_4bit_8 ( const uint32_t q_0, half2 (&dq)[4], int stride ) { half dqh[8]; for (int i = 0; i < 8; i++) dqh[i] = dq_ns(exb(q_0, i * 4, 0x0f), 8); for (int i = 0; i < 4; i++) dq[i] = __halves2half2(dqh[i * 2], dqh[i * 2 + 1]); } __forceinline__ __device__ void dequant_4bit_8_prep_zero_scale ( const uint32_t zero, const half scale, half2 (&z1)[2], half2 (&y1)[2] ) { half z = __int2half_rn(-((int)zero)); z = __hmul(z, scale); z1[0] = __half2half2(z); y1[0] = __half2half2(scale); } __forceinline__ __device__ void dequant_4bit_8_prep_zero ( const uint32_t zero, half2(&z1)[2], half2(&y1)[2] ) { half z = __int2half_rn(-((int)zero)); z1[0] = __half2half2(z); } __forceinline__ __device__ void dequant_4bit_8_gptq ( const uint32_t q_0, half2 (&dq)[4], half2 (&z1)[2], half2 (&y1)[2], int stride, bool scaled ) { half2 dqh2[8]; uint32_t qa = q_0; for (int i = 0; i < 4; i++) { half d0 = __int2half_rn(qa & 0x0f); qa >>= 4; half d1 = __int2half_rn(qa & 0x0f); qa >>= 4; dqh2[i] = __halves2half2(d0, d1); } if (scaled) { dq[0] = __hfma2(dqh2[0], y1[0], z1[0]); dq[1] = __hfma2(dqh2[1], y1[0], z1[0]); dq[2] = __hfma2(dqh2[2], y1[0], z1[0]); dq[3] = __hfma2(dqh2[3], y1[0], z1[0]); } else { dq[0] = __hadd2(dqh2[0], z1[0]); dq[1] = __hadd2(dqh2[1], z1[0]); dq[2] = __hadd2(dqh2[2], z1[0]); dq[3] = __hadd2(dqh2[3], z1[0]); } } #endif #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_4.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_4.cuh", "repo_id": "text-generation-inference", "token_count": 3278 }
203
import pytest import torch from transformers import AutoTokenizer from text_generation_server.models import Model def get_test_model(): class TestModel(Model): def batch_type(self): raise NotImplementedError def generate_token(self, batch): raise NotImplementedError tokenizer = AutoTokenizer.from_pretrained("huggingface/llama-7b") model = TestModel( torch.nn.Linear(1, 1), tokenizer, False, torch.float32, torch.device("cpu") ) return model @pytest.mark.private def test_decode_streaming_english_spaces(): model = get_test_model() truth = "Hello here, this is a simple test" all_input_ids = [15043, 1244, 29892, 445, 338, 263, 2560, 1243] assert ( all_input_ids == model.tokenizer(truth, add_special_tokens=False)["input_ids"] ) decoded_text = "" offset = 0 token_offset = 0 for i in range(len(all_input_ids)): text, offset, token_offset = model.decode_token( all_input_ids[: i + 1], offset, token_offset ) decoded_text += text assert decoded_text == truth @pytest.mark.private def test_decode_streaming_chinese_utf8(): model = get_test_model() truth = "我很感谢你的热情" all_input_ids = [ 30672, 232, 193, 139, 233, 135, 162, 235, 179, 165, 30919, 30210, 234, 134, 176, 30993, ] decoded_text = "" offset = 0 token_offset = 0 for i in range(len(all_input_ids)): text, offset, token_offset = model.decode_token( all_input_ids[: i + 1], offset, token_offset ) decoded_text += text assert decoded_text == truth
text-generation-inference/server/tests/models/test_model.py/0
{ "file_path": "text-generation-inference/server/tests/models/test_model.py", "repo_id": "text-generation-inference", "token_count": 829 }
204
# coding=utf-8 # Copyright 2022 EleutherAI The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch GPTNeoX model.""" from typing import Optional, Tuple, Union import os import torch import torch.distributed import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.activations import ACT2FN from transformers.file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from transformers.modeling_utils import PreTrainedModel from transformers import GPTNeoXConfig from loguru import logger from text_generation_server.utils.layers import ( TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, TensorParallelHead, ) CUSTOM_KERNELS_ENABLED = False if ( torch.cuda.is_available() and not os.environ.get("DISABLE_CUSTOM_KERNELS", "False") == "True" ): try: from custom_kernels import fused_attention_cuda CUSTOM_KERNELS_ENABLED = True except ImportError: pass if not CUSTOM_KERNELS_ENABLED: logger.warning("We're not using custom kernels.") def make_causal_mask( input_ids_shape: torch.Size, device: torch.device, past_key_values_length: int ) -> torch.BoolTensor: """ Make causal mask used for self-attention. """ batch_size, target_length = input_ids_shape mask = torch.ones( (target_length, target_length + past_key_values_length), dtype=torch.bool, device=device, ) mask = mask.triu(1 + past_key_values_length) expanded_mask = mask.unsqueeze(0).expand( batch_size, target_length, target_length + past_key_values_length ) return expanded_mask def expand_mask(mask: torch.Tensor, tgt_length: int) -> torch.BoolTensor: """ Expands attention_mask from `[batch_size, src_length]` to `[batch_size, 1, tgt_length, src_length]`. """ batch_size, src_length = mask.shape tgt_length = tgt_length if tgt_length is not None else src_length expanded_mask = ~(mask[:, None, :].to(torch.bool)) return expanded_mask.expand(batch_size, tgt_length, src_length) def prepare_attn_mask( attention_mask: torch.Tensor, input_shape: Tuple[int, int], past_key_values_length: int, ) -> torch.BoolTensor: # create causal mask # [batch_size, seq_length] -> [batch_size, tgt_length, src_length] combined_attention_mask = None device = attention_mask.device _, src_length = input_shape if src_length > 1: combined_attention_mask = make_causal_mask( input_shape, device=device, past_key_values_length=past_key_values_length ) # [batch_size, seq_length] -> [batch_size, tgt_length, src_length] expanded_attn_mask = expand_mask(attention_mask, tgt_length=src_length) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask ) return combined_attention_mask class GPTNeoXPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ class GPTNeoXAttention(nn.Module): def __init__(self, config, prefix, weights): super().__init__() self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_attention_heads self.rotary_ndims = int(self.head_size * config.rotary_pct) max_positions = config.max_position_embeddings # ??? TODO # self.register_buffer( # "bias", # torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view( # 1, 1, max_positions, max_positions # ), # ) # self.register_buffer("masked_bias", torch.tensor(-1e9)) self.rotary_emb = RotaryEmbedding( self.rotary_ndims, config.max_position_embeddings, base=config.rotary_emb_base, ) self.rotary_emb.inv_freq = nn.Parameter( weights.get_tensor(f"{prefix}.rotary_emb.inv_freq") ) self.inv_norm_factor = 1.0 / torch.sqrt( torch.tensor(self.head_size, dtype=torch.float32) ).to(torch.get_default_dtype()) if self.num_attention_heads % weights.process_group.size() != 0: raise ValueError( f"`num_attention_heads` must be divisible by `num_shards` " f"(got `num_attention_heads`: {self.num_attention_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_attention_heads = ( self.num_attention_heads // weights.process_group.size() ) self.query_key_value = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.query_key_value", weights=weights, bias=True ) self.dense = TensorParallelRowLinear.load( config, prefix=f"{prefix}.dense", weights=weights, bias=True ) def forward( self, hidden_states, position_ids, attention_mask, head_mask=None, layer_past=None, use_cache=False, output_attentions=False, ): has_layer_past = layer_past is not None # Compute QKV # Attention heads [batch, seq_len, hidden_size] # --> [batch, seq_len, (np * 3 * head_size)] qkv = self.query_key_value(hidden_states) # [batch, seq_len, (num_heads * 3 * head_size)] # --> [batch, seq_len, num_heads, 3 * head_size] new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size) qkv = qkv.view(*new_qkv_shape).permute(0, 2, 1, 3) # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size] query, key, value = qkv.split(self.head_size, -1) # Compute token offset for rotary embeddings (when decoding) seq_len = key.shape[-2] if has_layer_past: seq_len += layer_past[0].shape[-2] # Compute rotary embeddings on rotary_ndims query_rot = query[..., : self.rotary_ndims] key_rot = key[..., : self.rotary_ndims] query_rot, key_rot = self.rotary_emb(query_rot, key_rot, position_ids, seq_len) query[..., : self.rotary_ndims] = query_rot key[..., : self.rotary_ndims] = key_rot if CUSTOM_KERNELS_ENABLED: attn_output, present, attn_weights = fused_attention_cuda.forward( query, key, value, layer_past, attention_mask, head_mask, self.inv_norm_factor, self.num_attention_heads, use_cache, ) else: # Cache QKV values if has_layer_past: past_key = layer_past[0] past_value = layer_past[1] key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) present = (key, value) if use_cache else None # Compute attention attn_output, attn_weights = self._attn( query, key, value, attention_mask, head_mask ) # Reshape outputs attn_output = self._merge_heads( attn_output, self.num_attention_heads, self.head_size ) attn_output = self.dense(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs @classmethod def _split_heads(cls, tensor, num_attention_heads, attn_head_size): """ Splits hidden dim into attn_head_size and num_attention_heads """ # tensor: [bs, seq_len, hidden_size] new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size) # -> [bs, seq_len, num_attention_heads, attn_head_size] tensor = tensor.view(new_shape) # -> [bs, num_attention_heads, seq_len, attn_head_size] tensor = tensor.permute(0, 2, 1, 3) return tensor @classmethod def _merge_heads(cls, tensor, num_attention_heads, attn_head_size): """ Merges attn_head_size dim and num_attn_heads dim into hidden dim """ # tensor [bs, num_attention_heads, seq_len, attn_head_size] tensor = tensor.permute(0, 2, 1, 3).contiguous() # -> [bs, seq_len, num_attention_heads, attn_head_size] tensor = tensor.view( tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size ) # -> [bs, seq_len, hidden_size] return tensor def _attn(self, query, key, value, attention_mask=None, head_mask=None): # q, k, v: [bs, num_attention_heads, seq_len, attn_head_size] # compute causal mask from causal mask buffer batch_size, num_attention_heads, query_length, attn_head_size = query.size() key_length = key.size(-2) query = query.reshape( batch_size * num_attention_heads, query_length, attn_head_size ) key = key.reshape(batch_size * num_attention_heads, key_length, attn_head_size) attn_scores = torch.zeros( 1, dtype=query.dtype, device=key.device, ).expand(batch_size * num_attention_heads, query_length, key_length) attn_scores = torch.baddbmm( attn_scores, query, key.transpose(1, 2), beta=1.0, alpha=self.inv_norm_factor, ) # cast attention scores to fp32, compute scaled softmax and cast back to initial dtype - [batch_size, num_heads, q_length, kv_length] input_dtype = attn_scores.dtype if input_dtype in [torch.float16, torch.bfloat16]: attn_scores = attn_scores.to(torch.float) attn_scores = torch.where( attention_mask, torch.finfo(attn_scores.dtype).min, attn_scores ) attn_scores = attn_scores.view( batch_size, num_attention_heads, query_length, key_length ) attn_weights = nn.functional.softmax(attn_scores, dim=-1) attn_weights = attn_weights.to(value.dtype) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return attn_output, attn_weights class RotaryEmbedding(torch.nn.Module): def __init__(self, dim, max_position_embeddings, base=10000, device=None): super().__init__() self.true_inv_freq = 1.0 / ( base ** (torch.arange(0, dim, 2).float().to(device) / dim) ) self.register_buffer("inv_freq", self.true_inv_freq) # Build here to make `torch.jit.trace` work. self.max_seq_len_cached = max_position_embeddings self.cos_cached = None self.sin_cached = None @staticmethod def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) @staticmethod def _create_cos_sin(inv_freq, max_position_embeddings, dtype, device): t = torch.arange( max_position_embeddings, device=inv_freq.device, dtype=inv_freq.dtype ) freqs = torch.einsum("i,j->ij", t, inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) return emb.cos().to(device).to(dtype), emb.sin().to(device).to(dtype) def forward(self, q, k, position_ids, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if ( seq_len > self.max_seq_len_cached or self.cos_cached is None or self.sin_cached is None ): if seq_len > self.max_seq_len_cached: self.max_seq_len_cached = seq_len self.cos_cached, self.sin_cached = self._create_cos_sin( self.true_inv_freq, self.max_seq_len_cached, q.dtype, q.device ) return rotary_forward(q, k, self.cos_cached, self.sin_cached, position_ids) @torch.jit.script def rotary_forward(q, k, cos, sin, position_ids): cos = cos[position_ids].unsqueeze(1) sin = sin[position_ids].unsqueeze(1) chunk_size = q.shape[-1] // 2 q1, q2 = q.split(chunk_size, -1) q_rotated = torch.cat((-q2, q1), dim=-1) k1, k2 = k.split(chunk_size, -1) k_rotated = torch.cat((-k2, k1), dim=-1) q_embed = (q * cos) + (q_rotated * sin) k_embed = (k * cos) + (k_rotated * sin) return q_embed, k_embed class GPTNeoXMLP(nn.Module): def __init__(self, config, prefix, weights): super().__init__() self.act = ( ACT2FN[config.hidden_act] if "gelu_fast" not in config.hidden_act else lambda x: torch.nn.functional.gelu(x, approximate="tanh") ) self.dense_h_to_4h = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.dense_h_to_4h", weights=weights, bias=True ) self.dense_4h_to_h = TensorParallelRowLinear.load( config, prefix=f"{prefix}.dense_4h_to_h", weights=weights, bias=True ) def forward(self, hidden_states): hidden_states = self.dense_h_to_4h(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dense_4h_to_h(hidden_states) return hidden_states class GPTNeoXLayer(nn.Module): def __init__(self, layer_id, config, weights): super().__init__() self.use_parallel_residual = config.use_parallel_residual self.input_layernorm = nn.LayerNorm.load( prefix=f"gpt_neox.layers.{layer_id}.input_layernorm", weights=weights, eps=config.layer_norm_eps, ) self.post_attention_layernorm = nn.LayerNorm.load( prefix=f"gpt_neox.layers.{layer_id}.post_attention_layernorm", weights=weights, eps=config.layer_norm_eps, ) self.attention = GPTNeoXAttention( config, prefix=f"gpt_neox.layers.{layer_id}.attention", weights=weights ) self.mlp = GPTNeoXMLP( config, prefix=f"gpt_neox.layers.{layer_id}.mlp", weights=weights ) def forward( self, hidden_states, position_ids, attention_mask=None, head_mask=None, use_cache=False, layer_past=None, output_attentions=False, ): attention_layer_outputs = self.attention( self.input_layernorm(hidden_states), attention_mask=attention_mask, position_ids=position_ids, layer_past=layer_past, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attn_output = attention_layer_outputs[ 0 ] # output_attn: attn_output, present, (attn_weights) outputs = attention_layer_outputs[1:] if self.use_parallel_residual: # pseudocode: # x = x + attn(ln1(x)) + mlp(ln2(x)) mlp_output = self.mlp(self.post_attention_layernorm(hidden_states)) hidden_states = mlp_output + attn_output + hidden_states else: # pseudocode: # x = x + attn(ln1(x)) # x = x + mlp(ln2(x)) attn_output = attn_output + hidden_states mlp_output = self.mlp(self.post_attention_layernorm(attn_output)) hidden_states = mlp_output + attn_output if use_cache: outputs = ( hidden_states, ) + outputs # hidden_states, present, (attn_weights) else: outputs = (hidden_states,) + outputs[1:] # hidden_states, (attn_weights) return outputs class GPTNeoXModel(GPTNeoXPreTrainedModel): def __init__(self, config, weights): super().__init__(config) self.config = config self.num_attention_heads = config.num_attention_heads self.embed_in = TensorParallelEmbedding( prefix="gpt_neox.embed_in", weights=weights ) self.layers = nn.ModuleList( [ GPTNeoXLayer(layer_id, config, weights) for layer_id in range(config.num_hidden_layers) ] ) self.final_layer_norm = nn.LayerNorm.load( prefix="gpt_neox.final_layer_norm", weights=weights, eps=config.layer_norm_eps, ) self.tp_world_size = weights.process_group.size() def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids=None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: r""" past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) use_cache = use_cache if use_cache is not None else self.config.use_cache if input_ids is not None and inputs_embeds is not None: raise ValueError( "You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape if past_key_values is None: past_length = 0 past_key_values = tuple([None] * self.config.num_hidden_layers) else: past_length = past_key_values[0][0].size(-2) if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_length, seq_length + past_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() if inputs_embeds is None: inputs_embeds = self.embed_in(input_ids) hidden_states = inputs_embeds # Attention mask. seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values[0] is not None: past_key_values_length = past_key_values[0][0].shape[-1] seq_length_with_past = seq_length_with_past + past_key_values_length if attention_mask is None: attention_mask = torch.ones( (batch_size, seq_length_with_past), device=hidden_states.device ) else: attention_mask = attention_mask.to(hidden_states.device) causal_mask = prepare_attn_mask( attention_mask, input_shape=(batch_size, seq_length), past_key_values_length=past_key_values_length, ) assert self.num_attention_heads % self.tp_world_size == 0 block_size = self.num_attention_heads // self.tp_world_size causal_mask = torch.repeat_interleave(causal_mask, block_size, dim=0) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) presents = () if use_cache else None all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, (layer, layer_past) in enumerate(zip(self.layers, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = layer( hidden_states, position_ids=position_ids, attention_mask=causal_mask, head_mask=head_mask[i], layer_past=layer_past, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_attentions = all_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.final_layer_norm(hidden_states) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_attentions, ) class GPTNeoxForCausalLM(GPTNeoXPreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] def __init__(self, config, weights): super().__init__(config) self.gpt_neox = GPTNeoXModel(config, weights) self.embed_out = TensorParallelHead.load( config, prefix="embed_out", weights=weights ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Returns: Example: ```python >>> from transformers import AutoTokenizer, GPTNeoXForCausalLM, GPTNeoXConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") >>> config = GPTNeoXConfig.from_pretrained("EleutherAI/gpt-neox-20b") >>> config.is_decoder = True >>> model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b", config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ```""" return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) outputs = self.gpt_neox( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] lm_logits = self.embed_out(hidden_states) lm_loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(lm_logits.device) # we are doing next-token prediction; shift prediction scores and input ids by one shift_logits = lm_logits[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct( shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1) ) if not return_dict: output = (lm_logits,) + outputs[1:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithPast( loss=lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs, ): input_shape = input_ids.shape # cut decoder_input_ids if past is used if past_key_values and past_key_values[0] is not None: input_ids = input_ids[:, -1:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "attention_mask": attention_mask, "past_key_values": past_key_values, "position_ids": position_ids, } ) return model_inputs def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple( past_state.index_select(0, beam_idx) for past_state in layer_past[:2] ) + layer_past[2:], ) return reordered_past
text-generation-inference/server/text_generation_server/models/custom_modeling/neox_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/neox_modeling.py", "repo_id": "text-generation-inference", "token_count": 14270 }
205
import inspect import torch from abc import ABC, abstractmethod from typing import List, Tuple, Optional, TypeVar, Type from transformers import PreTrainedTokenizerBase, PretrainedConfig from text_generation_server.models.types import Batch, Generation from text_generation_server.utils.speculate import get_speculate from text_generation_server.pb.generate_pb2 import InfoResponse B = TypeVar("B", bound=Batch) class Model(ABC): def __init__( self, model: torch.nn.Module, tokenizer: PreTrainedTokenizerBase, requires_padding: bool, dtype: torch.dtype, device: torch.device, rank: int = 0, world_size: int = 1, sliding_window: Optional[int] = None, speculate: Optional[int] = None, ): self.model = model.eval() self.tokenizer = tokenizer self.all_special_ids = set(tokenizer.all_special_ids) self.requires_padding = requires_padding self.dtype = dtype self.device = device self.rank = rank self.world_size = world_size self.sliding_window = sliding_window if sliding_window != -1 else None if speculate is None: speculate = get_speculate() self.speculate = speculate self.has_position_ids = ( inspect.signature(model.forward).parameters.get("position_ids", None) is not None ) self.check_initialized() @property def info(self) -> InfoResponse: if self.requires_padding and self.sliding_window is not None: raise NotImplementedError("sliding_window is not implemented with padding") return InfoResponse( requires_padding=self.requires_padding, dtype=str(self.dtype), device_type=self.device.type, window_size=self.sliding_window, speculate=self.speculate, ) @property @abstractmethod def batch_type(self) -> Type[B]: raise NotImplementedError @abstractmethod def generate_token( self, batch: B ) -> Tuple[List[Generation], Optional[B], Tuple[int, int]]: raise NotImplementedError def warmup(self, batch: B) -> Optional[int]: self.generate_token(batch) return None def decode_token( self, all_input_ids: List[int], prefix_offset: int = 0, read_offset: int = 0, skip_special_tokens: bool = False, ) -> Tuple[str, int, int]: """Hack to hopefully support generate_stream for the maximum number of tokenizers""" # The prefix text is necessary only to defeat cleanup algorithms in the decode # which decide to add a space or not depending on the surrounding ids. prefix_text = self.tokenizer.decode( all_input_ids[prefix_offset:read_offset], skip_special_tokens=skip_special_tokens, ) new_text = self.tokenizer.decode( all_input_ids[prefix_offset:], skip_special_tokens=skip_special_tokens ) if len(new_text) > len(prefix_text) and not new_text.endswith("�"): # utf-8 char at the end means it's a potential unfinished byte sequence # from byte fallback tokenization. # If it's in the middle, it's probably a real invalid id generated # by the model new_text = new_text[len(prefix_text) :] return new_text, read_offset, len(all_input_ids) else: return "", prefix_offset, read_offset def check_initialized(self): uninitialized_parameters = [] for n, p in self.model.named_parameters(): if p.data.device == torch.device("meta"): uninitialized_parameters.append(n) if uninitialized_parameters: raise RuntimeError( f"found uninitialized parameters in model {self.__class__.__name__}: {uninitialized_parameters}" )
text-generation-inference/server/text_generation_server/models/model.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/model.py", "repo_id": "text-generation-inference", "token_count": 1674 }
206
import os import torch from loguru import logger from text_generation_server.utils.import_utils import IS_CUDA_SYSTEM, IS_ROCM_SYSTEM if os.getenv("USE_FLASH_ATTENTION", "").lower() == "false": raise ImportError("`USE_FLASH_ATTENTION` is false.") if not torch.cuda.is_available(): raise ImportError("CUDA is not available") major, minor = torch.cuda.get_device_capability() is_sm75 = major == 7 and minor == 5 is_sm8x = major == 8 and minor >= 0 is_sm90 = major == 9 and minor == 0 HAS_FLASH_ATTN = False HAS_FLASH_ATTN_V2_CUDA = False HAS_FLASH_ATTN_V2_ROCM = False try: try: import flash_attn_2_cuda except ImportError: architecture_suffix = "" if IS_CUDA_SYSTEM: architecture_suffix = "-cuda" elif IS_ROCM_SYSTEM: architecture_suffix = "-rocm" raise ImportError( "Flash Attention V2 is not installed.\n" "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) " f"or install flash attention v2 with `cd server && make install install-flash-attention-v2{architecture_suffix}`" ) if not (is_sm8x or is_sm90): raise ImportError( f"GPU with CUDA capability {major} {minor} is not supported for " "Flash Attention V2" ) HAS_FLASH_ATTN_V2_CUDA = IS_CUDA_SYSTEM HAS_FLASH_ATTN_V2_ROCM = IS_ROCM_SYSTEM except ImportError as e: try: import flash_attn_cuda except ImportError: raise ImportError( "Flash Attention is not installed.\n" "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) " "or install flash attention with `cd server && make install install-flash-attention`" ) from e if IS_CUDA_SYSTEM and not (is_sm75 or is_sm8x or is_sm90): raise ImportError( f"GPU with CUDA capability {major} {minor} is not supported" ) from e elif IS_ROCM_SYSTEM: for idx in range(torch.cuda.device_count()): if "MI210" not in torch.cuda.get_device_name( idx ) and "MI250" not in torch.cuda.get_device_name(idx): raise ImportError( f"AMD GPU {torch.cuda.get_device_name(idx)} does not support flash-attention" ) logger.warning(f"Unable to use Flash Attention V2: {e}") HAS_FLASH_ATTN = True def attention( q, k, v, out, cu_seqlens, max_s, softmax_scale, window_size_left=-1, ): if window_size_left <= 0 and window_size_left != -1: raise ValueError("`window_size_left` must be > 0 or -1") if HAS_FLASH_ATTN_V2_CUDA: return flash_attn_2_cuda.varlen_fwd( q, k, v, out, cu_seqlens, cu_seqlens, max_s, max_s, 0.0, softmax_scale, False, True, window_size_left, 0, False, None, ) elif HAS_FLASH_ATTN_V2_ROCM: if window_size_left != -1: raise ValueError( f"RoCm version of Flash Attention v2 does not support window attention (window_size_left != -1, got window_size_left={window_size_left})." ) # RoCm flash API does not take the window_size_left and window_size_right arguments. return flash_attn_2_cuda.varlen_fwd( q, k, v, out, cu_seqlens, cu_seqlens, max_s, max_s, 0.0, softmax_scale, False, True, False, None, ) elif HAS_FLASH_ATTN: if window_size_left != -1: raise NotImplementedError( "window_size_left is only available with flash attn v2" ) # Flash attention v1 requires q, k and v to have the same number of heads if k.shape[1] != q.shape[1]: # MQA expand if k.shape[1] == 1: k = k.expand(-1, q.shape[1], -1) # Grouped attention reshape else: original_shape = k.shape k = ( k.unsqueeze(2) .expand(-1, -1, q.shape[1] // k.shape[1], -1) .reshape(original_shape[0], -1, original_shape[2]) ) if v.shape[1] != q.shape[1]: # MQA expand if v.shape[1] == 1: v = v.expand(-1, q.shape[1], -1) # Grouped attention reshape else: original_shape = v.shape v = ( v.unsqueeze(2) .expand(-1, -1, q.shape[1] // v.shape[1], -1) .reshape(original_shape[0], -1, original_shape[2]) ) return flash_attn_cuda.fwd( q, k, v, out, cu_seqlens, cu_seqlens, max_s, max_s, 0.0, softmax_scale, False, True, False, 0, None, ) raise NotImplementedError("flash attention is not installed")
text-generation-inference/server/text_generation_server/utils/flash_attn.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/flash_attn.py", "repo_id": "text-generation-inference", "token_count": 2911 }
207
# coding=utf-8 # Copyright 2023 Authors of "A Watermark for Large Language Models" # available at https://arxiv.org/abs/2301.10226 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch from transformers import LogitsProcessor from typing import List, Union GAMMA = float(os.getenv("WATERMARK_GAMMA", 0.5)) DELTA = float(os.getenv("WATERMARK_DELTA", 2.0)) class WatermarkLogitsProcessor(LogitsProcessor): def __init__( self, gamma: float = GAMMA, delta: float = DELTA, hash_key: int = 15485863, # just a large prime number to create a rng seed with sufficient bit width device: str = "cpu", ): # watermarking parameters self.gamma = gamma self.delta = delta self.rng = torch.Generator(device=device) self.hash_key = hash_key def _seed_rng(self, input_ids: Union[List[int], torch.LongTensor]): if isinstance(input_ids, list): assert ( len(input_ids) >= 1 ), "requires at least a 1 token prefix sequence to seed rng" prev_token = input_ids[-1] else: assert len(input_ids) == 1 input_ids = input_ids[0] assert ( input_ids.shape[-1] >= 1 ), "requires at least a 1 token prefix sequence to seed rng" prev_token = input_ids[-1].item() self.rng.manual_seed(self.hash_key * prev_token) def _get_greenlist_ids( self, input_ids: Union[List[int], torch.LongTensor], max_value: int, device: torch.device, ) -> List[int]: # seed the rng using the previous tokens/prefix self._seed_rng(input_ids) greenlist_size = int(max_value * self.gamma) vocab_permutation = torch.randperm(max_value, device=device, generator=self.rng) greenlist_ids = vocab_permutation[:greenlist_size] return greenlist_ids @staticmethod def _calc_greenlist_mask( scores: torch.FloatTensor, greenlist_token_ids ) -> torch.BoolTensor: green_tokens_mask = torch.zeros_like(scores) green_tokens_mask[-1, greenlist_token_ids] = 1 final_mask = green_tokens_mask.bool() return final_mask @staticmethod def _bias_greenlist_logits( scores: torch.Tensor, greenlist_mask: torch.Tensor, greenlist_bias: float ) -> torch.Tensor: scores[greenlist_mask] = scores[greenlist_mask] + greenlist_bias return scores def __call__( self, input_ids: Union[List[int], torch.LongTensor], scores: torch.FloatTensor ) -> torch.FloatTensor: greenlist_ids = self._get_greenlist_ids( input_ids, scores.shape[-1], scores.device ) green_tokens_mask = self._calc_greenlist_mask( scores=scores, greenlist_token_ids=greenlist_ids ) scores = self._bias_greenlist_logits( scores=scores, greenlist_mask=green_tokens_mask, greenlist_bias=self.delta ) return scores
text-generation-inference/server/text_generation_server/utils/watermark.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/watermark.py", "repo_id": "text-generation-inference", "token_count": 1489 }
208
extern crate napi_build; fn main() { napi_build::setup(); }
tokenizers/bindings/node/build.rs/0
{ "file_path": "tokenizers/bindings/node/build.rs", "repo_id": "tokenizers", "token_count": 26 }
209
// import { promisify } from 'util' import { BPE, Tokenizer, mergeEncodings, slice } from '../../' describe('slice', () => { const text = 'My name is John 👋' const sliceText = slice.bind({}, text) it('returns the full text when no params', () => { const sliced = sliceText() expect(sliced).toEqual(text) }) it('accepts `undefined` as second parameter', () => { const original = sliceText(undefined) expect(original).toEqual(text) }) it('accepts `undefined` as third parameter', () => { const original = sliceText(0, undefined) expect(original).toEqual(text) }) it('throws an error when `begin` is out of range', () => { expect(() => sliceText(1000)).toThrow() }) it('returns slice starting at the specified index', () => { const original = sliceText(3) expect(original).toEqual('name is John 👋') }) it('throws an error when `end` is out of range', () => { expect(() => sliceText(0, 1000)).toThrow() }) it('returns the text between the two specified indexes', () => { const original = sliceText(3, 7) expect(original).toEqual('name') }) describe('with only a negative `begin`', () => { it('returns the original string counting from the end when in the range', () => { const original = sliceText(-1) expect(original).toEqual('👋') }) it('throws an error when out of range', () => { expect(() => sliceText(-1000)).toThrow() }) }) describe('with a positive `begin` and a negative `end`', () => { it('returns correct slice when resulting range is valid', () => { const original = sliceText(3, -7) expect(original).toEqual('name is') }) it('throws an error when resulting `end` index is lower than `begin`', () => { expect(() => sliceText(7, -12)).toThrow() }) it('throws an error when `begin` is out of range', () => { expect(() => sliceText(1000, -12)).toThrow() }) it('throws an error when resulting `end` index is out of range', () => { expect(() => sliceText(7, -1000)).toThrow() }) }) describe('with a negative `begin` and a positive `end`', () => { it('returns correct slice when resulting range is valid', () => { const original = sliceText(-9, 10) expect(original).toEqual('is') }) it('throws an error when resulting `begin` index is upper than `end`', () => { expect(() => sliceText(-3, 5)).toThrow() }) it('throws an error when `end` is out of range', () => { expect(() => sliceText(-5, 1000)).toThrow() }) it('throws an error when resulting `begin` index is out of range', () => { expect(() => sliceText(-1000, 10)).toThrow() }) }) describe('with negatives `begin` and `end`', () => { it('returns correct slice when resulting range is valid', () => { const original = sliceText(-9, -7) expect(original).toEqual('is') }) it('throws an error when resulting `end` index is lower than `begin`', () => { expect(() => sliceText(-5, -10)).toThrow() }) it('throws an error when resulting `begin` index is out of range', () => { expect(() => sliceText(-1000, -10)).toThrow() }) it('throws an error when resulting `end` index is out of range', () => { expect(() => sliceText(-10, -1000)).toThrow() }) }) }) describe('mergeEncodings', () => { const model = BPE.empty() const tokenizer = new Tokenizer(model) tokenizer.addTokens(['my', 'name', 'is', 'john']) it('accepts `undefined` as a second parameter', () => { const encoding = mergeEncodings([], undefined) expect(encoding.constructor.name).toEqual('Encoding') }) it('returns correct result with `growingOffsets` not provided', async () => { const firstEncoding = await tokenizer.encode('my name is', null) const secondEncoding = await tokenizer.encode('john', null) const encoding = mergeEncodings([firstEncoding, secondEncoding]) expect(encoding.getTokens()).toEqual(['my', 'name', 'is', 'john']) expect(encoding.getOffsets()).toEqual([ [0, 2], [3, 7], [8, 10], [0, 4], ]) }) it('returns correct result when `growingOffsets` is `false`', async () => { const firstEncoding = await tokenizer.encode('my name is', null) const secondEncoding = await tokenizer.encode('john', null) const encoding = mergeEncodings([firstEncoding, secondEncoding], false) expect(encoding.getTokens()).toEqual(['my', 'name', 'is', 'john']) expect(encoding.getOffsets()).toEqual([ [0, 2], [3, 7], [8, 10], [0, 4], ]) }) it('returns correct result when `growingOffsets` is `true`', async () => { const firstEncoding = await tokenizer.encode('my name is', null) const secondEncoding = await tokenizer.encode('john', null) const encoding = mergeEncodings([firstEncoding, secondEncoding], true) expect(encoding.getTokens()).toEqual(['my', 'name', 'is', 'john']) expect(encoding.getOffsets()).toEqual([ [0, 2], [3, 7], [8, 10], [10, 14], ]) }) })
tokenizers/bindings/node/lib/bindings/utils.test.ts/0
{ "file_path": "tokenizers/bindings/node/lib/bindings/utils.test.ts", "repo_id": "tokenizers", "token_count": 1866 }
210
{ "name": "tokenizers-linux-arm64-musl", "version": "0.13.4-rc1", "os": [ "linux" ], "cpu": [ "arm64" ], "main": "tokenizers.linux-arm64-musl.node", "files": [ "tokenizers.linux-arm64-musl.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers", "libc": [ "musl" ] }
tokenizers/bindings/node/npm/linux-arm64-musl/package.json/0
{ "file_path": "tokenizers/bindings/node/npm/linux-arm64-musl/package.json", "repo_id": "tokenizers", "token_count": 291 }
211
#![deny(clippy::all)] pub const VERSION: &str = env!("CARGO_PKG_VERSION"); mod arc_rwlock_serde; pub mod decoders; pub mod encoding; pub mod models; pub mod normalizers; pub mod pre_tokenizers; pub mod processors; pub mod tasks; pub mod tokenizer; pub mod trainers; pub mod utils;
tokenizers/bindings/node/src/lib.rs/0
{ "file_path": "tokenizers/bindings/node/src/lib.rs", "repo_id": "tokenizers", "token_count": 102 }
212
# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [0.13.2] - [#1096] Python 3.11 support ## [0.13.1] - [#1072] Fixing Roberta type ids. ## [0.13.0] - [#956] PyO3 version upgrade - [#1055] M1 automated builds - [#1008] `Decoder` is now a composable trait, but without being backward incompatible - [#1047, #1051, #1052] `Processor` is now a composable trait, but without being backward incompatible Both trait changes warrant a "major" number since, despite best efforts to not break backward compatibility, the code is different enough that we cannot be exactly sure. ## [0.12.1] - [#938] **Reverted breaking change**. https://github.com/huggingface/transformers/issues/16520 ## [0.12.0] YANKED Bump minor version because of a breaking change. - [#938] [REVERTED IN 0.12.1] **Breaking change**. Decoder trait is modified to be composable. This is only breaking if you are using decoders on their own. tokenizers should be error free. - [#939] Making the regex in `ByteLevel` pre_tokenizer optional (necessary for BigScience) - [#952] Fixed the vocabulary size of UnigramTrainer output (to respect added tokens) - [#954] Fixed not being able to save vocabularies with holes in vocab (ConvBert). Yell warnings instead, but stop panicking. - [#962] Fix tests for python 3.10 - [#961] Added link for Ruby port of `tokenizers` ## [0.11.6] - [#919] Fixing single_word AddedToken. (regression from 0.11.2) - [#916] Deserializing faster `added_tokens` by loading them in batch. ## [0.11.5] - [#895] Build `python 3.10` wheels. ## [0.11.4] - [#884] Fixing bad deserialization following inclusion of a default for Punctuation ## [0.11.3] - [#882] Fixing Punctuation deserialize without argument. - [#868] Fixing missing direction in TruncationParams - [#860] Adding TruncationSide to TruncationParams ## [0.11.0] ### Fixed - [#585] Conda version should now work on old CentOS - [#844] Fixing interaction between `is_pretokenized` and `trim_offsets`. - [#851] Doc links ### Added - [#657]: Add SplitDelimiterBehavior customization to Punctuation constructor - [#845]: Documentation for `Decoders`. ### Changed - [#850]: Added a feature gate to enable disabling `http` features - [#718]: Fix `WordLevel` tokenizer determinism during training - [#762]: Add a way to specify the unknown token in `SentencePieceUnigramTokenizer` - [#770]: Improved documentation for `UnigramTrainer` - [#780]: Add `Tokenizer.from_pretrained` to load tokenizers from the Hugging Face Hub - [#793]: Saving a pretty JSON file by default when saving a tokenizer ## [0.10.3] ### Fixed - [#686]: Fix SPM conversion process for whitespace deduplication - [#707]: Fix stripping strings containing Unicode characters ### Added - [#693]: Add a CTC Decoder for Wave2Vec models ### Removed - [#714]: Removed support for Python 3.5 ## [0.10.2] ### Fixed - [#652]: Fix offsets for `Precompiled` corner case - [#656]: Fix BPE `continuing_subword_prefix` - [#674]: Fix `Metaspace` serialization problems ## [0.10.1] ### Fixed - [#616]: Fix SentencePiece tokenizers conversion - [#617]: Fix offsets produced by Precompiled Normalizer (used by tokenizers converted from SPM) - [#618]: Fix Normalizer.normalize with `PyNormalizedStringRefMut` - [#620]: Fix serialization/deserialization for overlapping models - [#621]: Fix `ByteLevel` instantiation from a previously saved state (using `__getstate__()`) ## [0.10.0] ### Added - [#508]: Add a Visualizer for notebooks to help understand how the tokenizers work - [#519]: Add a `WordLevelTrainer` used to train a `WordLevel` model - [#533]: Add support for conda builds - [#542]: Add Split pre-tokenizer to easily split using a pattern - [#544]: Ability to train from memory. This also improves the integration with `datasets` - [#590]: Add getters/setters for components on BaseTokenizer - [#574]: Add `fust_unk` option to SentencePieceBPETokenizer ### Changed - [#509]: Automatically stubbing the `.pyi` files - [#519]: Each `Model` can return its associated `Trainer` with `get_trainer()` - [#530]: The various attributes on each component can be get/set (ie. `tokenizer.model.dropout = 0.1`) - [#538]: The API Reference has been improved and is now up-to-date. ### Fixed - [#519]: During training, the `Model` is now trained in-place. This fixes several bugs that were forcing to reload the `Model` after a training. - [#539]: Fix `BaseTokenizer` enable_truncation docstring ## [0.9.4] ### Fixed - [#492]: Fix `from_file` on `BertWordPieceTokenizer` - [#498]: Fix the link to download `sentencepiece_model_pb2.py` - [#500]: Fix a typo in the docs quicktour ### Changed - [#506]: Improve Encoding mappings for pairs of sequence ## [0.9.3] ### Fixed - [#470]: Fix hanging error when training with custom component - [#476]: TemplateProcessing serialization is now deterministic - [#481]: Fix SentencePieceBPETokenizer.from_files ### Added - [#477]: UnicodeScripts PreTokenizer to avoid merges between various scripts - [#480]: Unigram now accepts an `initial_alphabet` and handles `special_tokens` correctly ## [0.9.2] ### Fixed - [#464]: Fix a problem with RobertaProcessing being deserialized as BertProcessing ## [0.9.1] ### Fixed - [#459]: Fix a problem with deserialization ## [0.9.0] ### Fixed - [#362]: Fix training deadlock with Python components. - [#363]: Fix a crash when calling `.train` with some non-existent files - [#355]: Remove a lot of possible crashes - [#389]: Improve truncation (crash and consistency) ### Added - [#379]: Add the ability to call `encode`/`encode_batch` with numpy arrays - [#292]: Support for the Unigram algorithm - [#378], [#394], [#416], [#417]: Many new Normalizer and PreTokenizer - [#403]: Add `TemplateProcessing` `PostProcessor`. - [#420]: Ability to fuse the "unk" token in BPE. ### Changed - [#360]: Lots of improvements related to words/alignment tracking - [#426]: Improvements on error messages thanks to PyO3 0.12 ## [0.8.1] ### Fixed - [#333]: Fix deserialization of `AddedToken`, where the content was not restored properly ### Changed - [#329]: Improved warning and behavior when we detect a fork - [#330]: BertNormalizer now keeps the same behavior than the original implementation when `strip_accents` is not specified. ## [0.8.0] ### Highlights of this release - We can now encode both pre-tokenized inputs, and raw strings. This is especially usefull when processing datasets that are already pre-tokenized like for NER (Name Entity Recognition), and helps while applying labels to each word. - Full tokenizer serialization. It is now easy to save a tokenizer to a single JSON file, to later load it back with just one line of code. That's what sharing a Tokenizer means now: 1 line of code. - With the serialization comes the compatibility with `Pickle`! The Tokenizer, all of its components, Encodings, everything can be pickled! - Training a tokenizer is now even faster (up to 5-10x) than before! - Compatibility with `multiprocessing`, even when using the `fork` start method. Since this library makes heavy use of the multithreading capacities of our computers to allows a very fast tokenization, this led to problems (deadlocks) when used with `multiprocessing`. This version now allows to disable the parallelism, and will warn you if this is necessary. - And a lot of other improvements, and fixes. ### Fixed - [#286]: Fix various crash when training a BPE model - [#309]: Fixed a few bugs related to additional vocabulary/tokens ### Added - [#272]: Serialization of the `Tokenizer` and all the parts (`PreTokenizer`, `Normalizer`, ...). This adds some methods to easily save/load an entire tokenizer (`from_str`, `from_file`). - [#273]: `Tokenizer` and its parts are now pickable - [#289]: Ability to pad to a multiple of a specified value. This is especially useful to ensure activation of the Tensor Cores, while ensuring padding to a multiple of 8. Use with `enable_padding(pad_to_multiple_of=8)` for example. - [#298]: Ability to get the currently set truncation/padding params - [#311]: Ability to enable/disable the parallelism using the `TOKENIZERS_PARALLELISM` environment variable. This is especially usefull when using `multiprocessing` capabilities, with the `fork` start method, which happens to be the default on Linux systems. Without disabling the parallelism, the process dead-locks while encoding. (Cf [#187] for more information) ### Changed - Improved errors generated during truncation: When the provided max length is too low are now handled properly. - [#249] `encode` and `encode_batch` now accept pre-tokenized inputs. When the input is pre-tokenized, the argument `is_pretokenized=True` must be specified. - [#276]: Improve BPE training speeds, by reading files sequentially, but parallelizing the processing of each file - [#280]: Use `onig` for byte-level pre-tokenization to remove all the differences with the original implementation from GPT-2 - [#309]: Improved the management of the additional vocabulary. This introduces an option `normalized`, controlling whether a token should be extracted from the normalized version of the input text. ## [0.7.0] ### Changed - Only one progress bar while reading files during training. This is better for use-cases with a high number of files as it avoids having too many progress bars on screen. Also avoids reading the size of each file before starting to actually read these files, as this process could take really long. - [#193]: `encode` and `encode_batch` now take a new optional argument, specifying whether we should add the special tokens. This is activated by default. - [#197]: `original_str` and `normalized_str` have been removed from the `Encoding` returned by `encode` and `encode_batch`. This brings a reduction of 70% of the memory footprint. - [#197]: The offsets provided on `Encoding` are now relative to the original string, and not the normalized one anymore. - The added token given to `add_special_tokens` or `add_tokens` on a `Tokenizer`, or while using `train(special_tokens=...)` can now be instances of `AddedToken` to provide more control over these tokens. - [#136]: Updated Pyo3 version - [#136]: Static methods `Model.from_files` and `Model.empty` are removed in favor of using constructors. - [#239]: `CharBPETokenizer` now corresponds to OpenAI GPT BPE implementation by default. ### Added - [#188]: `ByteLevel` is also a `PostProcessor` now and handles trimming the offsets if activated. This avoids the unintuitive inclusion of the whitespaces in the produced offsets, even if these whitespaces are part of the actual token. It has been added to `ByteLevelBPETokenizer` but it is off by default (`trim_offsets=False`). - [#236]: `RobertaProcessing` also handles trimming the offsets. - [#234]: New alignment mappings on the `Encoding`. Provide methods to easily convert between `char` or `word` (input space) and `token` (output space). - `post_process` can be called on the `Tokenizer` - [#208]: Ability to retrieve the vocabulary from the `Tokenizer` with `get_vocab(with_added_tokens: bool)` - [#136] Models can now be instantiated through object constructors. ### Fixed - [#193]: Fix some issues with the offsets being wrong with the `ByteLevel` BPE: - when `add_prefix_space=True` - [#156]: when a Unicode character gets split-up in multiple byte-level characters - Fix a bug where offsets were wrong when there was any added tokens in the sequence being encoded. - [#175]: Fix a bug that prevented the addition of more than a certain amount of tokens (even if not advised, but that's not the question). - [#205]: Trim the decoded string in `BPEDecoder` used by `CharBPETokenizer` ### How to migrate - Add the `ByteLevel` `PostProcessor` to your byte-level BPE tokenizers if relevant. If you are using `ByteLevelBPETokenizer`, this option is disabled by default (`trim_offsets=False`). - `BertWordPieceTokenizer` option to `add_special_tokens` must now be given to `encode` or `encode_batch` - Access to the `original_str` on the `Encoding` has been removed. The original string is the input of `encode` so it didn't make sense to keep it here. - No need to call `original_str.offsets(offsets[N])` to convert offsets to the original string. They are now relative to the original string by default. - Access to the `normalized_str` on the `Encoding` has been removed. Can be retrieved by calling `normalize(sequence)` on the `Tokenizer` - Change `Model.from_files` and `Model.empty` to use constructor. The model constructor should take the same arguments as the old methods. (ie `BPE(vocab, merges)` or `BPE()`) - If you were using the `CharBPETokenizer` and want to keep the same behavior as before, set `bert_normalizer=False` and `split_on_whitespace_only=True`. ## [0.6.0] ### Changed - [#165]: Big improvements in speed for BPE (Both training and tokenization) ### Fixed - [#160]: Some default tokens were missing from `BertWordPieceTokenizer` - [#156]: There was a bug in ByteLevel PreTokenizer that caused offsets to be wrong if a char got split up in multiple bytes. - [#174]: The `longest_first` truncation strategy had a bug ## [0.5.2] - [#163]: Do not open all files directly while training ### Fixed - We introduced a bug related to the saving of the WordPiece model in 0.5.1: The `vocab.txt` file was named `vocab.json`. This is now fixed. - The `WordLevel` model was also saving its vocabulary to the wrong format. ## [0.5.1] ### Changed - `name` argument is now optional when saving a `Model`'s vocabulary. When the name is not specified, the files get a more generic naming, like `vocab.json` or `merges.txt`. ## [0.5.0] ### Changed - [#145]: `BertWordPieceTokenizer` now cleans up some tokenization artifacts while decoding - [#149]: `ByteLevelBPETokenizer` now has `dropout`. - `do_lowercase` has been changed to `lowercase` for consistency between the different tokenizers. (Especially `ByteLevelBPETokenizer` and `CharBPETokenizer`) - [#139]: Expose `__len__` on `Encoding` - Improved padding performances. ### Added - Added a new `Strip` normalizer ### Fixed - [#145]: Decoding was buggy on `BertWordPieceTokenizer`. - [#152]: Some documentation and examples were still using the old `BPETokenizer` ### How to migrate - Use `lowercase` when initializing `ByteLevelBPETokenizer` or `CharBPETokenizer` instead of `do_lowercase`. ## [0.4.2] ### Fixed - [#137]: Fix a bug in the class `WordPieceTrainer` that prevented `BertWordPieceTokenizer` from being trained. ## [0.4.1] ### Fixed - [#134]: Fix a bug related to the punctuation in BertWordPieceTokenizer ## [0.4.0] ### Changed - [#131]: Replaced all .new() class methods by a proper __new__ implementation - Improved typings ### How to migrate - Remove all `.new` on all classe instanciations ## [0.3.0] ### Changed - BPETokenizer has been renamed to CharBPETokenizer for clarity. - Improve truncation/padding and the handling of overflowing tokens. Now when a sequence gets truncated, we provide a list of overflowing `Encoding` that are ready to be processed by a language model, just as the main `Encoding`. - Provide mapping to the original string offsets using: ``` output = tokenizer.encode(...) print(output.original_str.offsets(output.offsets[3])) ``` - [#99]: Exposed the vocabulary size on all tokenizers ### Added - Added `CharDelimiterSplit`: a new `PreTokenizer` that allows splitting sequences on the given delimiter (Works like `.split(delimiter)`) - Added `WordLevel`: a new model that simply maps `tokens` to their `ids`. ### Fixed - Fix a bug with IndexableString - Fix a bug with truncation ### How to migrate - Rename `BPETokenizer` to `CharBPETokenizer` - `Encoding.overflowing` is now a List instead of a `Optional[Encoding]` ## [0.2.1] ### Fixed - Fix a bug with the IDs associated with added tokens. - Fix a bug that was causing crashes in Python 3.5 [#1096]: https://github.com/huggingface/tokenizers/pull/1096 [#1072]: https://github.com/huggingface/tokenizers/pull/1072 [#956]: https://github.com/huggingface/tokenizers/pull/956 [#1008]: https://github.com/huggingface/tokenizers/pull/1008 [#1009]: https://github.com/huggingface/tokenizers/pull/1009 [#1047]: https://github.com/huggingface/tokenizers/pull/1047 [#1055]: https://github.com/huggingface/tokenizers/pull/1055 [#1051]: https://github.com/huggingface/tokenizers/pull/1051 [#1052]: https://github.com/huggingface/tokenizers/pull/1052 [#938]: https://github.com/huggingface/tokenizers/pull/938 [#939]: https://github.com/huggingface/tokenizers/pull/939 [#952]: https://github.com/huggingface/tokenizers/pull/952 [#954]: https://github.com/huggingface/tokenizers/pull/954 [#962]: https://github.com/huggingface/tokenizers/pull/962 [#961]: https://github.com/huggingface/tokenizers/pull/961 [#960]: https://github.com/huggingface/tokenizers/pull/960 [#919]: https://github.com/huggingface/tokenizers/pull/919 [#916]: https://github.com/huggingface/tokenizers/pull/916 [#895]: https://github.com/huggingface/tokenizers/pull/895 [#884]: https://github.com/huggingface/tokenizers/pull/884 [#882]: https://github.com/huggingface/tokenizers/pull/882 [#868]: https://github.com/huggingface/tokenizers/pull/868 [#860]: https://github.com/huggingface/tokenizers/pull/860 [#850]: https://github.com/huggingface/tokenizers/pull/850 [#844]: https://github.com/huggingface/tokenizers/pull/844 [#845]: https://github.com/huggingface/tokenizers/pull/845 [#851]: https://github.com/huggingface/tokenizers/pull/851 [#585]: https://github.com/huggingface/tokenizers/pull/585 [#793]: https://github.com/huggingface/tokenizers/pull/793 [#780]: https://github.com/huggingface/tokenizers/pull/780 [#770]: https://github.com/huggingface/tokenizers/pull/770 [#762]: https://github.com/huggingface/tokenizers/pull/762 [#718]: https://github.com/huggingface/tokenizers/pull/718 [#714]: https://github.com/huggingface/tokenizers/pull/714 [#707]: https://github.com/huggingface/tokenizers/pull/707 [#693]: https://github.com/huggingface/tokenizers/pull/693 [#686]: https://github.com/huggingface/tokenizers/pull/686 [#674]: https://github.com/huggingface/tokenizers/pull/674 [#657]: https://github.com/huggingface/tokenizers/pull/657 [#656]: https://github.com/huggingface/tokenizers/pull/656 [#652]: https://github.com/huggingface/tokenizers/pull/652 [#621]: https://github.com/huggingface/tokenizers/pull/621 [#620]: https://github.com/huggingface/tokenizers/pull/620 [#618]: https://github.com/huggingface/tokenizers/pull/618 [#617]: https://github.com/huggingface/tokenizers/pull/617 [#616]: https://github.com/huggingface/tokenizers/pull/616 [#590]: https://github.com/huggingface/tokenizers/pull/590 [#574]: https://github.com/huggingface/tokenizers/pull/574 [#544]: https://github.com/huggingface/tokenizers/pull/544 [#542]: https://github.com/huggingface/tokenizers/pull/542 [#539]: https://github.com/huggingface/tokenizers/pull/539 [#538]: https://github.com/huggingface/tokenizers/pull/538 [#533]: https://github.com/huggingface/tokenizers/pull/533 [#530]: https://github.com/huggingface/tokenizers/pull/530 [#519]: https://github.com/huggingface/tokenizers/pull/519 [#509]: https://github.com/huggingface/tokenizers/pull/509 [#508]: https://github.com/huggingface/tokenizers/pull/508 [#506]: https://github.com/huggingface/tokenizers/pull/506 [#500]: https://github.com/huggingface/tokenizers/pull/500 [#498]: https://github.com/huggingface/tokenizers/pull/498 [#492]: https://github.com/huggingface/tokenizers/pull/492 [#481]: https://github.com/huggingface/tokenizers/pull/481 [#480]: https://github.com/huggingface/tokenizers/pull/480 [#477]: https://github.com/huggingface/tokenizers/pull/477 [#476]: https://github.com/huggingface/tokenizers/pull/476 [#470]: https://github.com/huggingface/tokenizers/pull/470 [#464]: https://github.com/huggingface/tokenizers/pull/464 [#459]: https://github.com/huggingface/tokenizers/pull/459 [#420]: https://github.com/huggingface/tokenizers/pull/420 [#417]: https://github.com/huggingface/tokenizers/pull/417 [#416]: https://github.com/huggingface/tokenizers/pull/416 [#403]: https://github.com/huggingface/tokenizers/pull/403 [#394]: https://github.com/huggingface/tokenizers/pull/394 [#389]: https://github.com/huggingface/tokenizers/pull/389 [#379]: https://github.com/huggingface/tokenizers/pull/379 [#378]: https://github.com/huggingface/tokenizers/pull/378 [#363]: https://github.com/huggingface/tokenizers/pull/363 [#362]: https://github.com/huggingface/tokenizers/pull/362 [#360]: https://github.com/huggingface/tokenizers/pull/360 [#355]: https://github.com/huggingface/tokenizers/pull/355 [#333]: https://github.com/huggingface/tokenizers/pull/333 [#330]: https://github.com/huggingface/tokenizers/pull/330 [#329]: https://github.com/huggingface/tokenizers/pull/329 [#311]: https://github.com/huggingface/tokenizers/pull/311 [#309]: https://github.com/huggingface/tokenizers/pull/309 [#292]: https://github.com/huggingface/tokenizers/pull/292 [#289]: https://github.com/huggingface/tokenizers/pull/289 [#286]: https://github.com/huggingface/tokenizers/pull/286 [#280]: https://github.com/huggingface/tokenizers/pull/280 [#276]: https://github.com/huggingface/tokenizers/pull/276 [#273]: https://github.com/huggingface/tokenizers/pull/273 [#272]: https://github.com/huggingface/tokenizers/pull/272 [#249]: https://github.com/huggingface/tokenizers/pull/249 [#239]: https://github.com/huggingface/tokenizers/pull/239 [#236]: https://github.com/huggingface/tokenizers/pull/236 [#234]: https://github.com/huggingface/tokenizers/pull/234 [#208]: https://github.com/huggingface/tokenizers/pull/208 [#205]: https://github.com/huggingface/tokenizers/issues/205 [#197]: https://github.com/huggingface/tokenizers/pull/197 [#193]: https://github.com/huggingface/tokenizers/pull/193 [#190]: https://github.com/huggingface/tokenizers/pull/190 [#188]: https://github.com/huggingface/tokenizers/pull/188 [#187]: https://github.com/huggingface/tokenizers/issues/187 [#175]: https://github.com/huggingface/tokenizers/issues/175 [#174]: https://github.com/huggingface/tokenizers/issues/174 [#165]: https://github.com/huggingface/tokenizers/pull/165 [#163]: https://github.com/huggingface/tokenizers/issues/163 [#160]: https://github.com/huggingface/tokenizers/issues/160 [#156]: https://github.com/huggingface/tokenizers/pull/156 [#152]: https://github.com/huggingface/tokenizers/issues/152 [#149]: https://github.com/huggingface/tokenizers/issues/149 [#145]: https://github.com/huggingface/tokenizers/issues/145 [#139]: https://github.com/huggingface/tokenizers/issues/139 [#137]: https://github.com/huggingface/tokenizers/issues/137 [#134]: https://github.com/huggingface/tokenizers/issues/134 [#131]: https://github.com/huggingface/tokenizers/issues/131 [#99]: https://github.com/huggingface/tokenizers/pull/99
tokenizers/bindings/python/CHANGELOG.md/0
{ "file_path": "tokenizers/bindings/python/CHANGELOG.md", "repo_id": "tokenizers", "token_count": 7408 }
213
from .base_tokenizer import BaseTokenizer from .bert_wordpiece import BertWordPieceTokenizer from .byte_level_bpe import ByteLevelBPETokenizer from .char_level_bpe import CharBPETokenizer from .sentencepiece_bpe import SentencePieceBPETokenizer from .sentencepiece_unigram import SentencePieceUnigramTokenizer
tokenizers/bindings/python/py_src/tokenizers/implementations/__init__.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/__init__.py", "repo_id": "tokenizers", "token_count": 94 }
214
.tokenized-text { width:100%; padding:2rem; max-height: 400px; overflow-y: auto; box-sizing:border-box; line-height:4rem; /* Lots of space between lines */ font-family: "Roboto Light", "Ubuntu Light", "Ubuntu", monospace; box-shadow: 2px 2px 2px rgba(0,0,0,0.2); background-color: rgba(0,0,0,0.01); letter-spacing:2px; /* Give some extra separation between chars */ } .non-token{ /* White space and other things the tokenizer ignores*/ white-space: pre; letter-spacing:4px; border-top:1px solid #A0A0A0; /* A gentle border on top and bottom makes tabs more ovious*/ border-bottom:1px solid #A0A0A0; line-height: 1rem; height: calc(100% - 2px); } .token { white-space: pre; position:relative; color:black; letter-spacing:2px; } .annotation{ white-space:nowrap; /* Important - ensures that annotations appears even if the annotated text wraps a line */ border-radius:4px; position:relative; width:fit-content; } .annotation:before { /*The before holds the text and the after holds the background*/ z-index:1000; /* Make sure this is above the background */ content:attr(data-label); /* The annotations label is on a data attribute */ color:white; position:absolute; font-size:1rem; text-align:center; font-weight:bold; top:1.75rem; line-height:0; left:0; width:100%; padding:0.5rem 0; /* These make it so an annotation doesn't stretch beyond the annotated text if the label is longer*/ overflow: hidden; white-space: nowrap; text-overflow:ellipsis; } .annotation:after { content:attr(data-label); /* The content defines the width of the annotation*/ position:absolute; font-size:0.75rem; text-align:center; font-weight:bold; text-overflow:ellipsis; top:1.75rem; line-height:0; overflow: hidden; white-space: nowrap; left:0; width:100%; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/ padding:0.5rem 0; /* Nast hack below: We set the annotations color in code because we don't know the colors at css time. But you can't pass a color as a data attribute to get it into the pseudo element (this thing) So to get around that, annotations have the color set on them with a style attribute and then we can get the color with currentColor. Annotations wrap tokens and tokens set the color back to black */ background-color: currentColor; } .annotation:hover::after, .annotation:hover::before{ /* When the user hovers over an annotation expand the label to display in full */ min-width: fit-content; } .annotation:hover{ /* Emphasize the annotation start end with a border on hover*/ border-color: currentColor; border: 2px solid; } .special-token:not(:empty){ /* A none empty special token is like UNK (as opposed to CLS which has no representation in the text ) */ position:relative; } .special-token:empty::before{ /* Special tokens that don't have text are displayed as pseudo elements so we dont select them with the mouse*/ content:attr(data-stok); background:#202020; font-size:0.75rem; color:white; margin: 0 0.25rem; padding: 0.25rem; border-radius:4px } .special-token:not(:empty):before { /* Special tokens that have text (UNK) are displayed above the actual text*/ content:attr(data-stok); position:absolute; bottom:1.75rem; min-width:100%; width:100%; height:1rem; line-height:1rem; font-size:1rem; text-align:center; color:white; font-weight:bold; background:#202020; border-radius:10%; } /* We want to alternate the color of tokens, but we can't use nth child because tokens might be broken up by annotations instead we apply even and odd class at generation time and color them that way */ .even-token{ background:#DCDCDC ; border: 1px solid #DCDCDC; } .odd-token{ background:#A0A0A0; border: 1px solid #A0A0A0; } .even-token.multi-token,.odd-token.multi-token{ background: repeating-linear-gradient( 45deg, transparent, transparent 1px, #ccc 1px, #ccc 1px ), /* on "bottom" */ linear-gradient( to bottom, #FFB6C1, #999 ); } .multi-token:hover::after { content:"This char has more than 1 token"; /* The content defines the width of the annotation*/ color:white; background-color: black; position:absolute; font-size:0.75rem; text-align:center; font-weight:bold; text-overflow:ellipsis; top:1.75rem; line-height:0; overflow: hidden; white-space: nowrap; left:0; width:fit-content; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/ padding:0.5rem 0; }
tokenizers/bindings/python/py_src/tokenizers/tools/visualizer-styles.css/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/tools/visualizer-styles.css", "repo_id": "tokenizers", "token_count": 1806 }
215
use std::sync::{Arc, RwLock}; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::ser::SerializeStruct; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use tk::normalizer::SplitDelimiterBehavior; use tk::pre_tokenizers::bert::BertPreTokenizer; use tk::pre_tokenizers::byte_level::ByteLevel; use tk::pre_tokenizers::delimiter::CharDelimiterSplit; use tk::pre_tokenizers::digits::Digits; use tk::pre_tokenizers::metaspace::{Metaspace, PrependScheme}; use tk::pre_tokenizers::punctuation::Punctuation; use tk::pre_tokenizers::split::Split; use tk::pre_tokenizers::unicode_scripts::UnicodeScripts; use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit}; use tk::pre_tokenizers::PreTokenizerWrapper; use tk::tokenizer::Offsets; use tk::{PreTokenizedString, PreTokenizer}; use tokenizers as tk; use super::error::ToPyResult; use super::utils::*; /// Base class for all pre-tokenizers /// /// This class is not supposed to be instantiated directly. Instead, any implementation of a /// PreTokenizer will return an instance of this class when instantiated. #[pyclass( dict, module = "tokenizers.pre_tokenizers", name = "PreTokenizer", subclass )] #[derive(Clone, Serialize, Deserialize)] pub struct PyPreTokenizer { #[serde(flatten)] pub(crate) pretok: PyPreTokenizerTypeWrapper, } impl PyPreTokenizer { #[allow(dead_code)] pub(crate) fn new(pretok: PyPreTokenizerTypeWrapper) -> Self { PyPreTokenizer { pretok } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match &self.pretok { PyPreTokenizerTypeWrapper::Sequence(_) => { Py::new(py, (PySequence {}, base))?.into_py(py) } PyPreTokenizerTypeWrapper::Single(ref inner) => { match &*inner.as_ref().read().unwrap() { PyPreTokenizerWrapper::Custom(_) => Py::new(py, base)?.into_py(py), PyPreTokenizerWrapper::Wrapped(inner) => match inner { PreTokenizerWrapper::Whitespace(_) => { Py::new(py, (PyWhitespace {}, base))?.into_py(py) } PreTokenizerWrapper::Split(_) => { Py::new(py, (PySplit {}, base))?.into_py(py) } PreTokenizerWrapper::Punctuation(_) => { Py::new(py, (PyPunctuation {}, base))?.into_py(py) } PreTokenizerWrapper::Sequence(_) => { Py::new(py, (PySequence {}, base))?.into_py(py) } PreTokenizerWrapper::Metaspace(_) => { Py::new(py, (PyMetaspace {}, base))?.into_py(py) } PreTokenizerWrapper::Delimiter(_) => { Py::new(py, (PyCharDelimiterSplit {}, base))?.into_py(py) } PreTokenizerWrapper::WhitespaceSplit(_) => { Py::new(py, (PyWhitespaceSplit {}, base))?.into_py(py) } PreTokenizerWrapper::ByteLevel(_) => { Py::new(py, (PyByteLevel {}, base))?.into_py(py) } PreTokenizerWrapper::BertPreTokenizer(_) => { Py::new(py, (PyBertPreTokenizer {}, base))?.into_py(py) } PreTokenizerWrapper::Digits(_) => { Py::new(py, (PyDigits {}, base))?.into_py(py) } PreTokenizerWrapper::UnicodeScripts(_) => { Py::new(py, (PyUnicodeScripts {}, base))?.into_py(py) } }, } } }) } } impl PreTokenizer for PyPreTokenizer { fn pre_tokenize(&self, normalized: &mut PreTokenizedString) -> tk::Result<()> { self.pretok.pre_tokenize(normalized) } } #[pymethods] impl PyPreTokenizer { #[staticmethod] fn custom(pretok: PyObject) -> Self { PyPreTokenizer { pretok: PyPreTokenizerWrapper::Custom(CustomPreTokenizer::new(pretok)).into(), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.pretok).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle PreTokenizer: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { let unpickled = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle PreTokenizer: {}", e )) })?; self.pretok = unpickled; Ok(()) } Err(e) => Err(e), } } /// Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place /// /// This method allows to modify a :class:`~tokenizers.PreTokenizedString` to /// keep track of the pre-tokenization, and leverage the capabilities of the /// :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of /// the pre-tokenization of a raw string, you can use /// :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` /// /// Args: /// pretok (:class:`~tokenizers.PreTokenizedString): /// The pre-tokenized string on which to apply this /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` #[pyo3(text_signature = "(self, pretok)")] fn pre_tokenize(&self, pretok: &mut PyPreTokenizedString) -> PyResult<()> { ToPyResult(self.pretok.pre_tokenize(&mut pretok.pretok)).into() } /// Pre tokenize the given string /// /// This method provides a way to visualize the effect of a /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the /// alignment, nor does it provide all the capabilities of the /// :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use /// :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` /// /// Args: /// sequence (:obj:`str`): /// A string to pre-tokeize /// /// Returns: /// :obj:`List[Tuple[str, Offsets]]`: /// A list of tuple with the pre-tokenized parts and their offsets #[pyo3(text_signature = "(self, sequence)")] fn pre_tokenize_str(&self, s: &str) -> PyResult<Vec<(String, Offsets)>> { let mut pretokenized = tk::tokenizer::PreTokenizedString::from(s); ToPyResult(self.pretok.pre_tokenize(&mut pretokenized)).into_py()?; Ok(pretokenized .get_splits(tk::OffsetReferential::Original, tk::OffsetType::Char) .into_iter() .map(|(s, o, _)| (s.to_owned(), o)) .collect()) } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref pretok)) = *single.read().unwrap() { pretok.$($name)+ } else { unreachable!() } } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref mut pretok)) = *single.write().unwrap() { pretok.$name = $value; } } }}; ($self: ident, $variant: ident, @$name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref mut pretok)) = *single.write().unwrap() { pretok.$name($value); } } }}; } /// ByteLevel PreTokenizer /// /// This pre-tokenizer takes care of replacing all bytes of the given string /// with a corresponding representation, as well as splitting into words. /// /// Args: /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to add a space to the first word if there isn't already one. This /// lets us treat `hello` exactly like `say hello`. /// use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Set this to :obj:`False` to prevent this `pre_tokenizer` from using /// the GPT2 specific regexp for spliting on whitespace. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "ByteLevel")] pub struct PyByteLevel {} #[pymethods] impl PyByteLevel { #[getter] fn get_add_prefix_space(self_: PyRef<Self>) -> bool { getter!(self_, ByteLevel, add_prefix_space) } #[setter] fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) { setter!(self_, ByteLevel, add_prefix_space, add_prefix_space); } #[getter] fn get_use_regex(self_: PyRef<Self>) -> bool { getter!(self_, ByteLevel, use_regex) } #[setter] fn set_use_regex(self_: PyRef<Self>, use_regex: bool) { setter!(self_, ByteLevel, use_regex, use_regex); } #[new] #[pyo3(signature = (add_prefix_space = true, use_regex = true, **_kwargs), text_signature = "(self, add_prefix_space=True, use_regex=True)")] fn new( add_prefix_space: bool, use_regex: bool, _kwargs: Option<&PyDict>, ) -> (Self, PyPreTokenizer) { ( PyByteLevel {}, ByteLevel::default() .add_prefix_space(add_prefix_space) .use_regex(use_regex) .into(), ) } /// Returns the alphabet used by this PreTokenizer. /// /// Since the ByteLevel works as its name suggests, at the byte level, it /// encodes each byte value to a unique visible character. This means that there is a /// total of 256 different characters composing this alphabet. /// /// Returns: /// :obj:`List[str]`: A list of characters that compose the alphabet #[staticmethod] #[pyo3(text_signature = "()")] fn alphabet() -> Vec<String> { ByteLevel::alphabet() .into_iter() .map(|c| c.to_string()) .collect() } } /// This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+` #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Whitespace")] pub struct PyWhitespace {} #[pymethods] impl PyWhitespace { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyWhitespace {}, Whitespace {}.into()) } } /// This pre-tokenizer simply splits on the whitespace. Works like `.split()` #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "WhitespaceSplit")] pub struct PyWhitespaceSplit {} #[pymethods] impl PyWhitespaceSplit { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyWhitespaceSplit {}, WhitespaceSplit.into()) } } /// Split PreTokenizer /// /// This versatile pre-tokenizer splits using the provided pattern and /// according to the provided behavior. The pattern can be inverted by /// making use of the invert flag. /// /// Args: /// pattern (:obj:`str` or :class:`~tokenizers.Regex`): /// A pattern used to split the string. Usually a string or a a regex built with `tokenizers.Regex` /// /// behavior (:class:`~tokenizers.SplitDelimiterBehavior`): /// The behavior to use when splitting. /// Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", /// "contiguous" /// /// invert (:obj:`bool`, `optional`, defaults to :obj:`False`): /// Whether to invert the pattern. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Split")] pub struct PySplit {} #[pymethods] impl PySplit { #[new] #[pyo3(signature = (pattern, behavior, invert = false), text_signature = "(self, pattern, behavior, invert=False)")] fn new( pattern: PyPattern, behavior: PySplitDelimiterBehavior, invert: bool, ) -> PyResult<(Self, PyPreTokenizer)> { Ok(( PySplit {}, ToPyResult(Split::new(pattern, behavior.into(), invert)) .into_py()? .into(), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [" ", "removed"]) } } /// This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)` /// /// Args: /// delimiter: str: /// The delimiter char that will be used to split input #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "CharDelimiterSplit")] pub struct PyCharDelimiterSplit {} #[pymethods] impl PyCharDelimiterSplit { #[getter] fn get_delimiter(self_: PyRef<Self>) -> String { getter!(self_, Delimiter, delimiter.to_string()) } #[setter] fn set_delimiter(self_: PyRef<Self>, delimiter: PyChar) { setter!(self_, Delimiter, delimiter, delimiter.0); } #[new] #[pyo3(text_signature = None)] pub fn new(delimiter: PyChar) -> PyResult<(Self, PyPreTokenizer)> { Ok(( PyCharDelimiterSplit {}, CharDelimiterSplit::new(delimiter.0).into(), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [" "]) } } /// BertPreTokenizer /// /// This pre-tokenizer splits tokens on spaces, and also on punctuation. /// Each occurence of a punctuation character will be treated separately. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "BertPreTokenizer")] pub struct PyBertPreTokenizer {} #[pymethods] impl PyBertPreTokenizer { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyBertPreTokenizer {}, BertPreTokenizer.into()) } } /// This pre-tokenizer simply splits on punctuation as individual characters. /// /// Args: /// behavior (:class:`~tokenizers.SplitDelimiterBehavior`): /// The behavior to use when splitting. /// Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next", /// "contiguous" #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Punctuation")] pub struct PyPunctuation {} #[pymethods] impl PyPunctuation { #[new] #[pyo3( signature = (behavior = PySplitDelimiterBehavior(SplitDelimiterBehavior::Isolated)), text_signature = "(self, behavior=\"isolated\")")] fn new(behavior: PySplitDelimiterBehavior) -> (Self, PyPreTokenizer) { (PyPunctuation {}, Punctuation::new(behavior.into()).into()) } } /// This pre-tokenizer composes other pre_tokenizers and applies them in sequence #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Sequence")] pub struct PySequence {} #[pymethods] impl PySequence { #[new] #[pyo3(text_signature = "(self, pretokenizers)")] fn new(pre_tokenizers: &PyList) -> PyResult<(Self, PyPreTokenizer)> { let mut sequence = Vec::with_capacity(pre_tokenizers.len()); for n in pre_tokenizers.iter() { let pretokenizer: PyRef<PyPreTokenizer> = n.extract()?; match &pretokenizer.pretok { PyPreTokenizerTypeWrapper::Sequence(inner) => { sequence.extend(inner.iter().cloned()) } PyPreTokenizerTypeWrapper::Single(inner) => sequence.push(inner.clone()), } } Ok(( PySequence {}, PyPreTokenizer::new(PyPreTokenizerTypeWrapper::Sequence(sequence)), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [PyList::empty(py)]) } } fn from_string(string: String) -> Result<PrependScheme, PyErr> { let scheme = match string.as_str() { "first" => PrependScheme::First, "never" => PrependScheme::Never, "always" => PrependScheme::Always, _ => { return Err(exceptions::PyValueError::new_err(format!( "{} is an unknown variant, should be one of ['first', 'never', 'always']", string ))); } }; Ok(scheme) } /// Metaspace pre-tokenizer /// /// This pre-tokenizer replaces any whitespace by the provided replacement character. /// It then tries to split on these spaces. /// /// Args: /// replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): /// The replacement character. Must be exactly one character. By default we /// use the `▁` (U+2581) meta symbol (Same as in SentencePiece). /// /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to add a space to the first word if there isn't already one. This /// lets us treat `hello` exactly like `say hello`. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Metaspace")] pub struct PyMetaspace {} #[pymethods] impl PyMetaspace { #[getter] fn get_replacement(self_: PyRef<Self>) -> String { getter!(self_, Metaspace, get_replacement().to_string()) } #[setter] fn set_replacement(self_: PyRef<Self>, replacement: PyChar) { setter!(self_, Metaspace, @set_replacement, replacement.0); } #[getter] fn get_add_prefix_space(self_: PyRef<Self>) -> bool { getter!(self_, Metaspace, add_prefix_space) } #[setter] fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) { setter!(self_, Metaspace, add_prefix_space, add_prefix_space); } #[getter] fn get_prepend_scheme(self_: PyRef<Self>) -> String { // Assuming Metaspace has a method to get the prepend_scheme as a string let scheme: PrependScheme = getter!(self_, Metaspace, get_prepend_scheme()); match scheme { PrependScheme::First => "first", PrependScheme::Never => "never", PrependScheme::Always => "always", } .to_string() } #[setter] fn set_prepend_scheme(self_: PyRef<Self>, prepend_scheme: String) -> PyResult<()> { let scheme = from_string(prepend_scheme)?; setter!(self_, Metaspace, @set_prepend_scheme, scheme); Ok(()) } #[new] #[pyo3(signature = (replacement = PyChar('▁'), add_prefix_space = true, prepend_scheme=None, **_kwargs), text_signature = "(self, replacement=\"_\", add_prefix_space=True)")] fn new( replacement: PyChar, add_prefix_space: bool, prepend_scheme: Option<String>, _kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyPreTokenizer)> { // Create a new Metaspace instance let mut new_instance: Metaspace = Metaspace::new(replacement.0, add_prefix_space); // If a prepend scheme is provided, set it if let Some(prepend_scheme) = prepend_scheme { match from_string(prepend_scheme) { Ok(prepend_scheme_enum) => new_instance.set_prepend_scheme(prepend_scheme_enum), Err(err) => return Err(err), } } Ok((PyMetaspace {}, new_instance.into())) } } /// This pre-tokenizer simply splits using the digits in separate tokens /// /// Args: /// individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`): /// If set to True, digits will each be separated as follows:: /// /// "Call 123 please" -> "Call ", "1", "2", "3", " please" /// /// If set to False, digits will grouped as follows:: /// /// "Call 123 please" -> "Call ", "123", " please" #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Digits")] pub struct PyDigits {} #[pymethods] impl PyDigits { #[getter] fn get_individual_digits(self_: PyRef<Self>) -> bool { getter!(self_, Digits, individual_digits) } #[setter] fn set_individual_digits(self_: PyRef<Self>, individual_digits: bool) { setter!(self_, Digits, individual_digits, individual_digits); } #[new] #[pyo3(signature = (individual_digits = false), text_signature = "(self, individual_digits=False)")] fn new(individual_digits: bool) -> (Self, PyPreTokenizer) { (PyDigits {}, Digits::new(individual_digits).into()) } } /// This pre-tokenizer splits on characters that belong to different language family /// It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt /// Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too. /// This mimicks SentencePiece Unigram implementation. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "UnicodeScripts")] pub struct PyUnicodeScripts {} #[pymethods] impl PyUnicodeScripts { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyUnicodeScripts {}, UnicodeScripts::new().into()) } } #[derive(Clone)] pub(crate) struct CustomPreTokenizer { inner: PyObject, } impl CustomPreTokenizer { pub fn new(inner: PyObject) -> Self { Self { inner } } } impl tk::tokenizer::PreTokenizer for CustomPreTokenizer { fn pre_tokenize(&self, sentence: &mut PreTokenizedString) -> tk::Result<()> { Python::with_gil(|py| { let pretok = PyPreTokenizedStringRefMut::new(sentence); let py_pretok = self.inner.as_ref(py); py_pretok.call_method("pre_tokenize", (pretok.get(),), None)?; Ok(()) }) } } impl Serialize for CustomPreTokenizer { fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { Err(serde::ser::Error::custom( "Custom PreTokenizer cannot be serialized", )) } } impl<'de> Deserialize<'de> for CustomPreTokenizer { fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { Err(serde::de::Error::custom( "Custom PreTokenizer cannot be deserialized", )) } } #[derive(Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyPreTokenizerWrapper { Custom(CustomPreTokenizer), Wrapped(PreTokenizerWrapper), } impl Serialize for PyPreTokenizerWrapper { fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error> where S: Serializer, { match self { PyPreTokenizerWrapper::Wrapped(inner) => inner.serialize(serializer), PyPreTokenizerWrapper::Custom(inner) => inner.serialize(serializer), } } } #[derive(Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyPreTokenizerTypeWrapper { Sequence(Vec<Arc<RwLock<PyPreTokenizerWrapper>>>), Single(Arc<RwLock<PyPreTokenizerWrapper>>), } impl Serialize for PyPreTokenizerTypeWrapper { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match self { PyPreTokenizerTypeWrapper::Sequence(seq) => { let mut ser = serializer.serialize_struct("Sequence", 2)?; ser.serialize_field("type", "Sequence")?; ser.serialize_field("pretokenizers", seq)?; ser.end() } PyPreTokenizerTypeWrapper::Single(inner) => inner.serialize(serializer), } } } impl<I> From<I> for PyPreTokenizerWrapper where I: Into<PreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizerWrapper::Wrapped(pretok.into()) } } impl<I> From<I> for PyPreTokenizerTypeWrapper where I: Into<PyPreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizerTypeWrapper::Single(Arc::new(RwLock::new(pretok.into()))) } } impl<I> From<I> for PyPreTokenizer where I: Into<PreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizer { pretok: pretok.into().into(), } } } impl PreTokenizer for PyPreTokenizerTypeWrapper { fn pre_tokenize(&self, pretok: &mut PreTokenizedString) -> tk::Result<()> { match self { PyPreTokenizerTypeWrapper::Single(inner) => inner.read().unwrap().pre_tokenize(pretok), PyPreTokenizerTypeWrapper::Sequence(inner) => inner .iter() .try_for_each(|n| n.read().unwrap().pre_tokenize(pretok)), } } } impl PreTokenizer for PyPreTokenizerWrapper { fn pre_tokenize(&self, pretok: &mut PreTokenizedString) -> tk::Result<()> { match self { PyPreTokenizerWrapper::Wrapped(inner) => inner.pre_tokenize(pretok), PyPreTokenizerWrapper::Custom(inner) => inner.pre_tokenize(pretok), } } } /// PreTokenizers Module #[pymodule] pub fn pre_tokenizers(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyPreTokenizer>()?; m.add_class::<PyByteLevel>()?; m.add_class::<PyWhitespace>()?; m.add_class::<PyWhitespaceSplit>()?; m.add_class::<PySplit>()?; m.add_class::<PyBertPreTokenizer>()?; m.add_class::<PyMetaspace>()?; m.add_class::<PyCharDelimiterSplit>()?; m.add_class::<PyPunctuation>()?; m.add_class::<PySequence>()?; m.add_class::<PyDigits>()?; m.add_class::<PyUnicodeScripts>()?; Ok(()) } #[cfg(test)] mod test { use pyo3::prelude::*; use tk::pre_tokenizers::sequence::Sequence; use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit}; use tk::pre_tokenizers::PreTokenizerWrapper; use crate::pre_tokenizers::{ CustomPreTokenizer, PyPreTokenizer, PyPreTokenizerTypeWrapper, PyPreTokenizerWrapper, }; #[test] fn get_subtype() { Python::with_gil(|py| { let py_norm = PyPreTokenizer::new(Whitespace {}.into()); let py_wsp = py_norm.get_as_subtype(py).unwrap(); assert_eq!("Whitespace", py_wsp.as_ref(py).get_type().name().unwrap()); }) } #[test] fn serialize() { let py_wrapped: PyPreTokenizerWrapper = Whitespace {}.into(); let py_ser = serde_json::to_string(&py_wrapped).unwrap(); let rs_wrapped = PreTokenizerWrapper::Whitespace(Whitespace {}); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_ser, rs_ser); let py_pretok: PyPreTokenizer = serde_json::from_str(&rs_ser).unwrap(); match py_pretok.pretok { PyPreTokenizerTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() { PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::Whitespace(_)) => {} _ => panic!("Expected Whitespace"), }, _ => panic!("Expected wrapped, not custom."), } let py_seq: PyPreTokenizerWrapper = Sequence::new(vec![Whitespace {}.into(), WhitespaceSplit.into()]).into(); let py_wrapper_ser = serde_json::to_string(&py_seq).unwrap(); let rs_wrapped = PreTokenizerWrapper::Sequence(Sequence::new(vec![ Whitespace {}.into(), WhitespaceSplit.into(), ])); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_wrapper_ser, rs_ser); let py_seq = PyPreTokenizer::new(py_seq.into()); let py_ser = serde_json::to_string(&py_seq).unwrap(); assert_eq!(py_wrapper_ser, py_ser); let obj = Python::with_gil(|py| { let py_wsp = PyPreTokenizer::new(Whitespace {}.into()); let obj: PyObject = Py::new(py, py_wsp).unwrap().into_py(py); obj }); let py_seq: PyPreTokenizerWrapper = PyPreTokenizerWrapper::Custom(CustomPreTokenizer::new(obj)); assert!(serde_json::to_string(&py_seq).is_err()); } }
tokenizers/bindings/python/src/pre_tokenizers.rs/0
{ "file_path": "tokenizers/bindings/python/src/pre_tokenizers.rs", "repo_id": "tokenizers", "token_count": 13027 }
216
import pickle import pytest from tokenizers.models import BPE, Model, WordLevel, WordPiece from ..utils import bert_files, data_dir, roberta_files class TestBPE: def test_instantiate(self, roberta_files): assert isinstance(BPE(), Model) assert isinstance(BPE(), BPE) vocab = {"a": 0, "b": 1, "ab": 2} merges = [("a", "b")] assert isinstance(BPE(vocab, merges), Model) assert isinstance(BPE.from_file(roberta_files["vocab"], roberta_files["merges"]), BPE) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(vocab=vocab) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(merges=merges) assert isinstance( pickle.loads(pickle.dumps(BPE(vocab, merges))), BPE, ) # Deprecated calls in 0.9 with pytest.deprecated_call(): assert isinstance(BPE(roberta_files["vocab"], roberta_files["merges"]), Model) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(vocab=roberta_files["vocab"]) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(merges=roberta_files["merges"]) with pytest.deprecated_call(): assert isinstance( pickle.loads(pickle.dumps(BPE(roberta_files["vocab"], roberta_files["merges"]))), BPE, ) def test_can_modify(self): model = BPE( dropout=0.5, unk_token="[UNK]", continuing_subword_prefix="__prefix__", end_of_word_suffix="__suffix__", fuse_unk=False, ) assert model.dropout == 0.5 assert model.unk_token == "[UNK]" assert model.continuing_subword_prefix == "__prefix__" assert model.end_of_word_suffix == "__suffix__" assert model.fuse_unk == False assert model.byte_fallback == False # Modify these model.dropout = 0.1 assert pytest.approx(model.dropout) == 0.1 model.unk_token = "<unk>" assert model.unk_token == "<unk>" model.continuing_subword_prefix = None assert model.continuing_subword_prefix == None model.end_of_word_suffix = "suff" assert model.end_of_word_suffix == "suff" model.fuse_unk = True assert model.fuse_unk == True model.byte_fallback = True assert model.byte_fallback == True class TestWordPiece: def test_instantiate(self, bert_files): assert isinstance(WordPiece(), Model) assert isinstance(WordPiece(), WordPiece) vocab = {"a": 0, "b": 1, "ab": 2} assert isinstance(WordPiece(vocab), Model) assert isinstance(WordPiece(vocab), WordPiece) assert isinstance(WordPiece.from_file(bert_files["vocab"]), WordPiece) assert isinstance(pickle.loads(pickle.dumps(WordPiece(vocab))), WordPiece) # Deprecated calls in 0.9 with pytest.deprecated_call(): assert isinstance(WordPiece(bert_files["vocab"]), Model) with pytest.deprecated_call(): assert isinstance(pickle.loads(pickle.dumps(WordPiece(bert_files["vocab"]))), WordPiece) def test_can_modify(self): model = WordPiece( unk_token="<oov>", continuing_subword_prefix="__prefix__", max_input_chars_per_word=200, ) assert model.unk_token == "<oov>" assert model.continuing_subword_prefix == "__prefix__" assert model.max_input_chars_per_word == 200 # Modify these model.unk_token = "<unk>" assert model.unk_token == "<unk>" model.continuing_subword_prefix = "$$$" assert model.continuing_subword_prefix == "$$$" model.max_input_chars_per_word = 10 assert model.max_input_chars_per_word == 10 class TestWordLevel: def test_instantiate(self, roberta_files): assert isinstance(WordLevel(), Model) assert isinstance(WordLevel(), WordLevel) vocab = {"a": 0, "b": 1, "ab": 2} assert isinstance(WordLevel(vocab), Model) assert isinstance(WordLevel(vocab), WordLevel) assert isinstance(WordLevel.from_file(roberta_files["vocab"]), WordLevel) # The WordLevel model expects a vocab.json using the same format as roberta # so we can just try to load with this file with pytest.deprecated_call(): assert isinstance(WordLevel(roberta_files["vocab"]), Model) with pytest.deprecated_call(): assert isinstance(WordLevel(roberta_files["vocab"]), WordLevel) def test_can_modify(self): model = WordLevel(unk_token="<oov>") assert model.unk_token == "<oov>" # Modify these model.unk_token = "<unk>" assert model.unk_token == "<unk>"
tokenizers/bindings/python/tests/bindings/test_models.py/0
{ "file_path": "tokenizers/bindings/python/tests/bindings/test_models.py", "repo_id": "tokenizers", "token_count": 2254 }
217
import json import os import unittest import tqdm from huggingface_hub import HfApi, cached_download, hf_hub_url from tokenizers import Tokenizer from .utils import albert_base, data_dir class TestSerialization: def test_full_serialization_albert(self, albert_base): # Check we can read this file. # This used to fail because of BufReader that would fail because the # file exceeds the buffer capacity tokenizer = Tokenizer.from_file(albert_base) def check(tokenizer_file) -> bool: with open(tokenizer_file, "r") as f: data = json.load(f) if "pre_tokenizer" not in data: return True if "type" not in data["pre_tokenizer"]: return False if data["pre_tokenizer"]["type"] == "Sequence": for pre_tok in data["pre_tokenizer"]["pretokenizers"]: if "type" not in pre_tok: return False return True def slow(test_case): """ Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. """ if os.getenv("RUN_SLOW") != "1": return unittest.skip("use `RUN_SLOW=1` to run")(test_case) else: return test_case @slow class TestFullDeserialization(unittest.TestCase): def test_full_deserialization_hub(self): # Check we can read this file. # This used to fail because of BufReader that would fail because the # file exceeds the buffer capacity api = HfApi() not_loadable = [] invalid_pre_tokenizer = [] # models = api.list_models(filter="transformers") # for model in tqdm.tqdm(models): # model_id = model.modelId # for model_file in model.siblings: # filename = model_file.rfilename # if filename == "tokenizer.json": # all_models.append((model_id, filename)) all_models = [("HueyNemud/das22-10-camembert_pretrained", "tokenizer.json")] for model_id, filename in tqdm.tqdm(all_models): tokenizer_file = cached_download(hf_hub_url(model_id, filename=filename)) is_ok = check(tokenizer_file) if not is_ok: print(f"{model_id} is affected by no type") invalid_pre_tokenizer.append(model_id) try: Tokenizer.from_file(tokenizer_file) except Exception as e: print(f"{model_id} is not loadable: {e}") not_loadable.append(model_id) except: print(f"{model_id} is not loadable: Rust error") not_loadable.append(model_id) self.assertEqual(invalid_pre_tokenizer, []) self.assertEqual(not_loadable, [])
tokenizers/bindings/python/tests/test_serialization.py/0
{ "file_path": "tokenizers/bindings/python/tests/test_serialization.py", "repo_id": "tokenizers", "token_count": 1251 }
218
# Visualizer <tokenizerslangcontent> <python> ## Annotation [[autodoc]] tokenizers.tools.Annotation ## EncodingVisualizer [[autodoc]] tokenizers.tools.EncodingVisualizer - __call__ </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/visualizer.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/visualizer.mdx", "repo_id": "tokenizers", "token_count": 134 }
219
# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [0.13.2] - Python only changes ## [0.13.1] - [#1072] Fixing Roberta type ids. ## [0.13.0] - [#1009] `unstable_wasm` feature to support building on Wasm (it's unstable !) - [#1008] `Decoder` is now a composable trait, but without being backward incompatible - [#1047, #1051, #1052] `Processor` is now a composable trait, but without being backward incompatible Both trait changes warrant a "major" number since, despite best efforts to not break backward compatibility, the code is different enough that we cannot be exactly sure. ## [0.12.1] - [#938] **Reverted breaking change**. https://github.com/huggingface/transformers/issues/16520 ## [0.12.0] YANKED Bump minor version because of a breaking change. - [#938] [REVERTED IN 0.12.1] **Breaking change**. Decoder trait is modified to be composable. This is only breaking if you are using decoders on their own. tokenizers should be error free. - [#939] Making the regex in `ByteLevel` pre_tokenizer optional (necessary for BigScience) - [#952] Fixed the vocabulary size of UnigramTrainer output (to respect added tokens) - [#954] Fixed not being able to save vocabularies with holes in vocab (ConvBert). Yell warnings instead, but stop panicking. - [#961] Added link for Ruby port of `tokenizers` - [#960] Feature gate for `cli` and its `clap` dependency ## [0.11.3] - [#919] Fixing single_word AddedToken. (regression from 0.11.2) - [#916] Deserializing faster `added_tokens` by loading them in batch. ## [0.11.2] - [#884] Fixing bad deserialization following inclusion of a default for Punctuation ## [0.11.1] - [#882] Fixing Punctuation deserialize without argument. - [#868] Fixing missing direction in TruncationParams - [#860] Adding TruncationSide to TruncationParams ## [0.11.0] ### Fixed - [#236]: Fix a bug with offsets being shifted when there are sub-sequences (Usually with special tokens and/or added tokens in the sequence). - [#286]: Fix various crash when training a BPE model - [#309]: Fixed a few bugs related to additional vocabulary/tokens - [#363]: Fix panic from unwrapping `File::open` in `count_words` ### Changed - [#234]: Completely changed the alignement mappings available on `Encoding`. Previous mappings were misleading and only providing offsets. New ones provide methods to easily convert between `char` or `word` (input space) and `token` (output space) - [#236]: `AddedToken` with special options like `rstrip` will keep the matched whitespaces in the textual representation of the token, exposed in `tokens` on the `Encoding`. The ID stays the same as usual. This fixes the offsets for said tokens. - [#236]: Offsets are now converted back to the original referential before we merge the sub-sequences together and then do the post-processing. This also fixes some offsets bugs. - [#236]: ByteLevel PostProcessor now uses the `add_prefix_space` attribute to determine how to trim offsets. - Improved `TruncationError` to handle cases where provided max length is too low. - [#249]: `encode` and `encode_batch` input has been greatly improved, and it now also accept pre-tokenized inputs. - Improved `TruncationError` to handle cases where provided max length is too low. - [#276]: Improve BPE training speeds, by reading files sequentially, but parallelizing the processing of each file - [#280]: Use `onig` for byte-level pre-tokenization to remove all the differences with the original implementation from GPT-2 - [#309]: Improved the management of the additional vocabulary. This introduces an option `normalized`, controlling whether a token should be extracted from the normalized version of the input text. - [#330]: BertNormalizer now keeps the same behavior than the original implementation when `strip_accents` is not specified. - [#355]: Tokenizer does not use any dynamic dispatch anymore. - [#377]: Use byte offsets everywhere (instead of the char offsets) ### Added - [#236]: RobertaProcessing is now also taking care of trimming offsets, and works just as ByteLevel on this front. - [#272]: Serialization of the `Tokenizer` and all the parts (`PreTokenizer`, `Normalizer`, ...) using serde. It is now easy to save/load an entire tokenizer. - [#289]: Ability to pad to a multiple of a specified value. This is especially useful to ensure activation of the Tensor Cores, while ensuring padding to a multiple of 8. - [#298]: Ability to get the currently set truncation/padding params - [#311]: Ability to enable/disable the parallelism using the `TOKENIZERS_PARALLELISM` environment variable. - [#403]: Add `TemplateProcessing` `PostProcessor`. ### How to migrate - Replace any `XXX_to_YYY_offsets()` method call by any of the new ones. - Specify the `add_prefix_space` and `trim_offsets` options on `RobertaProcessing` if you don't want the offsets trimmed out. - Any custom `PostProcessor` now handles offsets relative to the original string (as opposed to the normalized one). ## [0.10.1] ### Fixed - [#226]: Fix the word indexes when there are special tokens ## [0.10.0] ### Changed - [#222]: All Tokenizer's subparts must now be `Send + Sync` ### Added - [#208]: Ability to retrieve the vocabulary from the `Tokenizer` & `Model` ### Fixed - [#205]: Trim the decoded string in `BPEDecoder` - [b770f36]: Fix a bug with added tokens generated IDs ## [0.9.0] ### Changed - Only one progress bar while reading files during training. This is better for use-cases with a high number of files as it avoids having too many progress bars on screen. Also avoids reading the size of each file before starting to actually read these files, as this process could take really long. - [#190]: Improved BPE and WordPiece builders - [#193]: `encode` and `encode_batch` now take a new argument, specifying whether we should add the special tokens - [#197]: The `NormalizedString` has been removed from the `Encoding`. It is now possible to retrieve it by calling `normalize` on the `Tokenizer`. This brings a reduction of 70% of the memory footprint - [#197]: The `NormalizedString` API has been improved. It is now possible to retrieve parts of both strings using both "normalized" or "original" offsets - [#197]: The offsets provided on `Encoding` are now relative to the original string, and not the normalized one anymore - `AddedToken` are now used for both `add_special_tokens` and `add_tokens`. Also, these AddedToken have more options to allow various behaviors. ### Added - [#188]: `impl PostProcessor for ByteLevel`: Handles trimming the offsets if activated. This avoids the unintuitive inclusion of the whitespaces in the produced offsets, even if these whitespaces are part of the actual token - More alignment mappings on the `Encoding`. - `post_process` can be called on the `Tokenizer` ### Fixed - [#193]: Fix some issues with the offsets being wrong with the `ByteLevel` BPE: - when `add_prefix_space` is activated - [#156]: when a Unicode character gets split-up in multiple byte-level characters - Fix a bug where offsets were wrong when there was any added tokens in the sequence being encoded. - [#175]: Fix a bug that prevented the addition of more than a certain amount of tokens (even if not advised, but that's not the question) ### How to migrate - Add the `ByteLevel` `PostProcessor` to your byte-level BPE tokenizers if relevant. ## [0.8.0] ### Changed - [#165]: Big improvements in speed for BPE (Both training and tokenization) ### Fixed - [#163]: Do not open all files directly while training - [#156]: There was a bug in ByteLevel PreTokenizer that caused offsets to be wrong if a char got split up in multiple bytes - [#174]: The `LongestFirst` truncation strategy had a bug [#1072]: https://github.com/huggingface/tokenizers/pull/1072 [#956]: https://github.com/huggingface/tokenizers/pull/956 [#1008]: https://github.com/huggingface/tokenizers/pull/1008 [#1009]: https://github.com/huggingface/tokenizers/pull/1009 [#1047]: https://github.com/huggingface/tokenizers/pull/1047 [#1055]: https://github.com/huggingface/tokenizers/pull/1055 [#1051]: https://github.com/huggingface/tokenizers/pull/1051 [#1052]: https://github.com/huggingface/tokenizers/pull/1052 [#938]: https://github.com/huggingface/tokenizers/pull/938 [#939]: https://github.com/huggingface/tokenizers/pull/939 [#952]: https://github.com/huggingface/tokenizers/pull/952 [#954]: https://github.com/huggingface/tokenizers/pull/954 [#961]: https://github.com/huggingface/tokenizers/pull/961 [#960]: https://github.com/huggingface/tokenizers/pull/960 [#919]: https://github.com/huggingface/tokenizers/pull/919 [#916]: https://github.com/huggingface/tokenizers/pull/916 [#884]: https://github.com/huggingface/tokenizers/pull/884 [#882]: https://github.com/huggingface/tokenizers/pull/882 [#868]: https://github.com/huggingface/tokenizers/pull/868 [#860]: https://github.com/huggingface/tokenizers/pull/860 [#403]: https://github.com/huggingface/tokenizers/pull/403 [#377]: https://github.com/huggingface/tokenizers/pull/377 [#355]: https://github.com/huggingface/tokenizers/pull/355 [#363]: https://github.com/huggingface/tokenizers/pull/363 [#330]: https://github.com/huggingface/tokenizers/pull/330 [#311]: https://github.com/huggingface/tokenizers/pull/311 [#309]: https://github.com/huggingface/tokenizers/pull/309 [#298]: https://github.com/huggingface/tokenizers/pull/298 [#289]: https://github.com/huggingface/tokenizers/pull/289 [#286]: https://github.com/huggingface/tokenizers/pull/286 [#280]: https://github.com/huggingface/tokenizers/pull/280 [#276]: https://github.com/huggingface/tokenizers/pull/276 [#272]: https://github.com/huggingface/tokenizers/pull/272 [#249]: https://github.com/huggingface/tokenizers/pull/249 [b770f36]: https://github.com/huggingface/tokenizers/commit/b770f364280af33efeffea8f0003102cda8cf1b7 [#236]: https://github.com/huggingface/tokenizers/pull/236 [#234]: https://github.com/huggingface/tokenizers/pull/234 [#226]: https://github.com/huggingface/tokenizers/pull/226 [#222]: https://github.com/huggingface/tokenizers/pull/222 [#208]: https://github.com/huggingface/tokenizers/pull/208 [#205]: https://github.com/huggingface/tokenizers/issues/205 [#197]: https://github.com/huggingface/tokenizers/pull/197 [#193]: https://github.com/huggingface/tokenizers/pull/193 [#190]: https://github.com/huggingface/tokenizers/pull/190 [#188]: https://github.com/huggingface/tokenizers/pull/188 [#175]: https://github.com/huggingface/tokenizers/issues/175 [#174]: https://github.com/huggingface/tokenizers/issues/174 [#165]: https://github.com/huggingface/tokenizers/pull/165 [#163]: https://github.com/huggingface/tokenizers/issues/163 [#156]: https://github.com/huggingface/tokenizers/pull/156
tokenizers/tokenizers/CHANGELOG.md/0
{ "file_path": "tokenizers/tokenizers/CHANGELOG.md", "repo_id": "tokenizers", "token_count": 3388 }
220
pub fn set_panic_hook() { // When the `console_error_panic_hook` feature is enabled, we can call the // `set_panic_hook` function at least once during initialization, and then // we will get better error messages if our code ever panics. // // For more details see // https://github.com/rustwasm/console_error_panic_hook#readme #[cfg(feature = "console_error_panic_hook")] console_error_panic_hook::set_once(); }
tokenizers/tokenizers/examples/unstable_wasm/src/utils.rs/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/src/utils.rs", "repo_id": "tokenizers", "token_count": 150 }
221
use crate::tokenizer::{Decoder, Result}; use serde::{Deserialize, Serialize}; #[derive(Deserialize, Clone, Debug, Serialize)] /// Allows decoding Original BPE by joining all the tokens and then replacing /// the suffix used to identify end-of-words by whitespaces #[serde(tag = "type")] #[non_exhaustive] pub struct BPEDecoder { pub suffix: String, } impl BPEDecoder { pub fn new(suffix: String) -> Self { Self { suffix } } } impl Default for BPEDecoder { fn default() -> Self { Self::new("</w>".into()) } } impl Decoder for BPEDecoder { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { let n = tokens.len() - 1; Ok(tokens .into_iter() .enumerate() .map(|(i, token)| { let replacement = if i == n { "" } else { " " }; token.replace(&self.suffix, replacement) }) .collect()) } }
tokenizers/tokenizers/src/decoders/bpe.rs/0
{ "file_path": "tokenizers/tokenizers/src/decoders/bpe.rs", "repo_id": "tokenizers", "token_count": 419 }
222
//! [Unigram](https://arxiv.org/abs/1804.10959) model. mod lattice; mod model; mod serialization; mod trainer; mod trie; pub use lattice::*; pub use model::*; pub use trainer::*;
tokenizers/tokenizers/src/models/unigram/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/unigram/mod.rs", "repo_id": "tokenizers", "token_count": 72 }
223
use crate::tokenizer::{NormalizedString, Normalizer, Result}; use crate::utils::macro_rules_attribute; use serde::{Deserialize, Serialize}; use unicode_normalization_alignments::char::is_combining_mark; #[derive(Copy, Clone, Debug, Deserialize, Serialize)] #[serde(tag = "type")] #[non_exhaustive] pub struct Strip { pub strip_left: bool, pub strip_right: bool, } impl Strip { pub fn new(strip_left: bool, strip_right: bool) -> Self { Self { strip_left, strip_right, } } } impl Normalizer for Strip { /// Strip the normalized string inplace fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { if self.strip_left && self.strip_right { // Fast path normalized.strip(); } else { if self.strip_left { normalized.lstrip(); } if self.strip_right { normalized.rstrip(); } } Ok(()) } } // This normalizer removes combining marks from a normalized string // It's different from unidecode as it does not attempt to modify // non ascii languages. #[derive(Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct StripAccents; impl Normalizer for StripAccents { /// Strip the normalized string inplace fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.filter(|c| !is_combining_mark(c)); Ok(()) } } #[cfg(test)] mod tests { use super::*; use crate::normalizer::NormalizedString; use crate::normalizers::Lowercase; use crate::normalizers::NFKD; use unicode_normalization_alignments::UnicodeNormalization; #[test] fn test_strip_accents() { // Unicode combining char let original: String = "Me llamó".nfkd().map(|(c, _)| c).collect(); let normalized = "Me llamo"; assert_ne!(original, normalized); let mut n = NormalizedString::from(original); StripAccents.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); // Ignores regular ascii let original = "Me llamo"; let normalized = "Me llamo"; assert_eq!(original, normalized); let mut n = NormalizedString::from(original); StripAccents.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); // Does not change chinese let original: String = "这很简单".nfkd().map(|(c, _)| c).collect(); let normalized = "这很简单"; assert_eq!(original, normalized); let mut n = NormalizedString::from(original); StripAccents.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); } #[test] fn test_vietnamese_bug() { let original: String = "ậ…".to_string(); let normalized = "a...".to_string(); assert_ne!(original, normalized); let mut n = NormalizedString::from(original); NFKD.normalize(&mut n).unwrap(); StripAccents.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); Lowercase.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); let original: String = "Cụ thể, bạn sẽ tham gia một nhóm các giám đốc điều hành tổ chức, các nhà lãnh đạo doanh nghiệp, các học giả, chuyên gia phát triển và tình nguyện viên riêng biệt trong lĩnh vực phi lợi nhuận…".to_string(); let normalized = "cu the, ban se tham gia mot nhom cac giam đoc đieu hanh to chuc, cac nha lanh đao doanh nghiep, cac hoc gia, chuyen gia phat trien va tinh nguyen vien rieng biet trong linh vuc phi loi nhuan...".to_string(); let mut n = NormalizedString::from(original); NFKD.normalize(&mut n).unwrap(); StripAccents.normalize(&mut n).unwrap(); Lowercase.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); } #[test] fn test_thai_bug() { let original = "ำน\u{e49}ำ3ลำ".to_string(); let normalized = "านา3ลา".to_string(); assert_ne!(original, normalized); let mut n = NormalizedString::from(original); NFKD.normalize(&mut n).unwrap(); StripAccents.normalize(&mut n).unwrap(); Lowercase.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); } #[test] fn test_strip_accents_multiple() { let original = "e\u{304}\u{304}\u{304}o"; let normalized = "eo"; assert_ne!(original, normalized); let mut n = NormalizedString::from(original); StripAccents.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); assert_eq!( n, NormalizedString::new( original.to_string(), normalized.to_string(), vec![(0, 1), (7, 8)], 0 ) ); assert_eq!( n.alignments_original(), vec![ (0, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 2) ] ); } }
tokenizers/tokenizers/src/normalizers/strip.rs/0
{ "file_path": "tokenizers/tokenizers/src/normalizers/strip.rs", "repo_id": "tokenizers", "token_count": 2512 }
224
use crate::tokenizer::{Encoding, PostProcessor, Result}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::iter::FromIterator; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(tag = "type")] pub struct BertProcessing { sep: (String, u32), cls: (String, u32), } impl Default for BertProcessing { fn default() -> Self { Self { sep: ("[SEP]".into(), 102), cls: ("[CLS]".into(), 101), } } } impl BertProcessing { pub fn new(sep: (String, u32), cls: (String, u32)) -> Self { Self { sep, cls } } } #[derive(thiserror::Error, Debug)] pub enum BertProcessorError { #[error("encodings vector length must be either 1 or 2")] InvalidEncodingsVecLength, } impl PostProcessor for BertProcessing { fn added_tokens(&self, is_pair: bool) -> usize { if is_pair { 3 } else { 2 } } fn process_encodings( &self, mut encodings: Vec<Encoding>, add_special_tokens: bool, ) -> Result<Vec<Encoding>> { if !add_special_tokens { return Ok(encodings); } let encodings: Vec<Encoding> = encodings .iter_mut() .enumerate() .map(|(i, encoding)| { if i == 0 { let ids = [&[self.cls.1], encoding.get_ids(), &[self.sep.1]].concat(); let type_ids = [&[0], encoding.get_type_ids(), &[0]].concat(); let tokens = [ &[self.cls.0.clone()], encoding.get_tokens(), &[self.sep.0.clone()], ] .concat(); let words = [&[None], encoding.get_word_ids(), &[None]].concat(); let offsets = [&[(0, 0)], encoding.get_offsets(), &[(0, 0)]].concat(); let special_tokens = [&[1u32], &vec![0; encoding.get_ids().len()][..], &[1]].concat(); let attention_mask = vec![1; ids.len()]; // For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't contain // the special tokens. let sequence_ranges = HashMap::from_iter(vec![(0, 1..ids.len() - 1)]); Encoding::new( ids, type_ids, tokens, words, offsets, special_tokens, attention_mask, encoding .take_overflowing() .into_iter() .map(|encoding| { let ids = [&[self.cls.1], encoding.get_ids(), &[self.sep.1]].concat(); let type_ids = [&[0], encoding.get_type_ids(), &[0]].concat(); let tokens = [ &[self.cls.0.clone()], encoding.get_tokens(), &[self.sep.0.clone()], ] .concat(); let words = [&[None], encoding.get_word_ids(), &[None]].concat(); let offsets = [&[(0, 0)], encoding.get_offsets(), &[(0, 0)]].concat(); let special_tokens = [&[1u32], &vec![0; encoding.get_ids().len()][..], &[1]] .concat(); let attention_mask = vec![1; ids.len()]; // For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't // contain the special tokens. let sequence_ranges = HashMap::from_iter(vec![(0, 1..ids.len() - 1)]); Encoding::new( ids, type_ids, tokens, words, offsets, special_tokens, attention_mask, vec![], sequence_ranges, ) }) .collect(), sequence_ranges, ) } else { let pair_ids = [encoding.get_ids(), &[self.sep.1]].concat(); let pair_type_ids = [encoding.get_type_ids(), &[1]].concat(); let pair_tokens = [encoding.get_tokens(), &[self.sep.0.clone()]].concat(); let pair_words = [encoding.get_word_ids(), &[None]].concat(); let pair_offsets = [encoding.get_offsets(), &[(0, 0)]].concat(); let pair_special_tokens = [&vec![0u32; encoding.get_type_ids().len()][..], &[1]].concat(); let pair_attention_mask = vec![1; pair_ids.len()]; // For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't contain // the special tokens. let pair_sequence_ranges = HashMap::from_iter(vec![(1, 0..pair_ids.len() - 1)]); Encoding::new( pair_ids, pair_type_ids, pair_tokens, pair_words, pair_offsets, pair_special_tokens, pair_attention_mask, encoding .take_overflowing() .into_iter() .map(|encoding| { let pair_ids = [encoding.get_ids(), &[self.sep.1]].concat(); let pair_type_ids = [encoding.get_type_ids(), &[1]].concat(); let pair_tokens = [encoding.get_tokens(), &[self.sep.0.clone()]].concat(); let pair_words = [encoding.get_word_ids(), &[None]].concat(); let pair_offsets = [encoding.get_offsets(), &[(0, 0)]].concat(); let pair_special_tokens = [&vec![0u32; encoding.get_type_ids().len()][..], &[1]].concat(); let pair_attention_mask = vec![1; pair_ids.len()]; // For compatibility with `TemplateProcessing`, the sequence_ranges // shouldn't contain the special tokens. let pair_sequence_ranges = HashMap::from_iter(vec![(1, 0..pair_ids.len() - 1)]); Encoding::new( pair_ids, pair_type_ids, pair_tokens, pair_words, pair_offsets, pair_special_tokens, pair_attention_mask, vec![], pair_sequence_ranges, ) }) .collect(), pair_sequence_ranges, ) } }) .collect(); Ok(encodings) } } #[cfg(test)] mod tests { use super::*; #[test] fn serde() { let bert = BertProcessing::default(); let bert_r = r#"{"type":"BertProcessing","sep":["[SEP]",102],"cls":["[CLS]",101]}"#; assert_eq!(serde_json::to_string(&bert).unwrap(), bert_r); assert_eq!( serde_json::from_str::<BertProcessing>(bert_r).unwrap(), bert ); } #[test] fn bert_processing() { let processor = BertProcessing::default(); assert_eq!(processor.added_tokens(false), 2); assert_eq!(processor.added_tokens(true), 3); use crate::Token; let encoding = Encoding::from_tokens( vec![ Token::new(12, "Hello".into(), (0, 5)), Token::new(14, "there".into(), (6, 11)), ], 0, ); let pair = Encoding::from_tokens(vec![Token::new(15, "pair".into(), (0, 4))], 0); let single_encoding = processor.process(encoding.clone(), None, true).unwrap(); assert_eq!( single_encoding, Encoding::new( vec![101, 12, 14, 102], vec![0, 0, 0, 0], vec![ "[CLS]".into(), "Hello".into(), "there".into(), "[SEP]".into() ], vec![None, None, None, None], vec![(0, 0), (0, 5), (6, 11), (0, 0)], vec![1, 0, 0, 1], vec![1, 1, 1, 1], vec![], HashMap::from_iter(vec![(0, 1..3)]), ) ); assert_eq!(single_encoding.token_to_sequence(2), Some(0)); assert_eq!(single_encoding.token_to_sequence(3), None); let pair_encoding = processor .process(encoding.clone(), Some(pair.clone()), true) .unwrap(); assert_eq!( pair_encoding, Encoding::new( vec![101, 12, 14, 102, 15, 102], vec![0, 0, 0, 0, 1, 1], vec![ "[CLS]".into(), "Hello".into(), "there".into(), "[SEP]".into(), "pair".into(), "[SEP]".into() ], vec![None, None, None, None, None, None], vec![(0, 0), (0, 5), (6, 11), (0, 0), (0, 4), (0, 0)], vec![1, 0, 0, 1, 0, 1], vec![1, 1, 1, 1, 1, 1], vec![], HashMap::from_iter(vec![(0, 1..3), (1, 4..5)]), ) ); assert_eq!(pair_encoding.token_to_sequence(2), Some(0)); assert_eq!(pair_encoding.token_to_sequence(3), None); assert_eq!(pair_encoding.token_to_sequence(4), Some(1)); assert_eq!(pair_encoding.token_to_sequence(5), None); // No special tokens let pair_encoding = processor.process(encoding, Some(pair), false).unwrap(); assert_eq!( pair_encoding, Encoding::new( vec![12, 14, 15], vec![0, 0, 1], vec!["Hello".into(), "there".into(), "pair".into(),], vec![None, None, None], vec![(0, 5), (6, 11), (0, 4)], vec![0, 0, 0], vec![1, 1, 1], vec![], HashMap::from_iter(vec![(0, 0..2), (1, 2..3)]), ) ); assert_eq!(pair_encoding.token_to_sequence(0), Some(0)); assert_eq!(pair_encoding.token_to_sequence(1), Some(0)); assert_eq!(pair_encoding.token_to_sequence(2), Some(1)); } }
tokenizers/tokenizers/src/processors/bert.rs/0
{ "file_path": "tokenizers/tokenizers/src/processors/bert.rs", "repo_id": "tokenizers", "token_count": 7375 }
225
pub(crate) mod cache; #[cfg(feature = "http")] pub(crate) mod from_pretrained; #[cfg(feature = "unstable_wasm")] mod fancy; #[cfg(feature = "unstable_wasm")] pub use fancy::SysRegex; #[cfg(not(feature = "unstable_wasm"))] mod onig; #[cfg(not(feature = "unstable_wasm"))] pub use crate::utils::onig::SysRegex; pub mod iter; pub mod padding; pub mod parallelism; pub(crate) mod progress; pub mod truncation; use serde::{Serialize, Serializer}; use std::collections::{BTreeMap, HashMap}; pub(crate) fn ordered_map<S, K, V>( value: &HashMap<K, V>, serializer: S, ) -> std::result::Result<S::Ok, S::Error> where S: Serializer, K: Serialize + std::cmp::Ord, V: Serialize, { let ordered: BTreeMap<_, _> = value.iter().collect(); ordered.serialize(serializer) } macro_rules! impl_enum_from ( ($from_ty:ty, $enum:ty, $variant:ident) => { impl From<$from_ty> for $enum { fn from(from: $from_ty) -> Self { <$enum>::$variant(from) } } } ); /// Implement `serde::{Serialize, Serializer}` with `#[serde(tag = "type")]` attribute for a given struct. /// Panic when a json string being deserilized misses field `type`. /// /// # Examples /// /// ``` /// # #[macro_use] extern crate tokenizers; /// use serde::{Serialize, Deserialize}; /// /// fn main() { /// impl_serde_type!{ /// #[derive(Debug)] /// struct Point { /// x: i32, /// #[serde(default = "default_y")] /// y: i32, /// } /// } /// fn default_y() -> i32 { /// 5 /// } /// /// let point = Point { x: 1, y: 2 }; /// let serialized_s = r#"{"type":"Point","x":1,"y":2}"#; /// assert_eq!(serde_json::to_string(&point).unwrap(), serialized_s); /// } /// ``` /// /// ```should_panic /// # #[macro_use] extern crate tokenizers; /// use serde::{Serialize, Deserialize}; /// /// fn main() { /// impl_serde_type!{ /// #[derive(Debug)] /// struct Point1D { /// x: i32, /// } /// } /// /// let serialized_s = r#"{"x":1}"#; /// let deserialized: Point1D = serde_json::from_str(serialized_s).unwrap(); /// } /// ``` /// /// # Examples (unit structs) /// /// ``` /// # #[macro_use] extern crate tokenizers; /// use serde::{Serialize, Deserialize}; /// /// fn main() { /// impl_serde_type!{ /// struct Unit; /// } /// /// let unit = Unit; /// let serialized_s = r#"{"type":"Unit"}"#; /// assert_eq!(serde_json::to_string(&unit).unwrap(), serialized_s); /// } /// ``` /// /// ```should_panic /// # #[macro_use] extern crate tokenizers; /// use serde::{Serialize, Deserialize}; /// /// fn main() { /// impl_serde_type!{ /// struct Unit; /// } /// /// let serialized_s = r#"{"some_field":1}"#; /// let deserialized: Unit = serde_json::from_str(serialized_s).unwrap(); /// } /// ``` #[macro_export] macro_rules! impl_serde_type{ ( $(#[$meta:meta])* $vis:vis struct $struct_name:ident { $( $(#[$field_meta:meta])* $field_vis:vis $field_name:ident : $field_type:ty ),*$(,)+ } ) => { paste::paste!{ $(#[$meta])* #[derive(Serialize, Deserialize)] #[serde(tag = "type", from = $struct_name "Deserializer")] $vis struct $struct_name{ $( $(#[$field_meta])* $field_vis $field_name : $field_type, )* } #[doc(hidden)] $(#[$meta])* #[derive(Deserialize)] #[serde(tag = "type", remote = $struct_name "")] struct [<$struct_name Def>]{ $( $(#[$field_meta])* $field_vis $field_name : $field_type, )* } #[doc(hidden)] #[derive(Deserialize)] enum [<$struct_name Type>] { $struct_name, } #[doc(hidden)] #[derive(Deserialize)] struct [<$struct_name Deserializer>] { #[allow(dead_code)] r#type: [<$struct_name Type>], #[serde(flatten, with = $struct_name "Def")] r#struct: $struct_name, } #[doc(hidden)] impl std::convert::From<[<$struct_name Deserializer>]> for $struct_name { fn from(v: [<$struct_name Deserializer>]) -> Self { v.r#struct } } } }; ( $(#[$meta:meta])* $vis:vis struct $struct_name:ident; ) => { paste::paste!{ $(#[$meta])* $vis struct $struct_name; impl serde::Serialize for $struct_name { fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> where S: serde::ser::Serializer { let helper = [<$struct_name Helper>]{r#type: [<$struct_name Type>]::$struct_name}; helper.serialize(serializer) } } impl<'de> serde::Deserialize<'de> for $struct_name { fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error> where D: serde::Deserializer<'de>, { let _helper = [<$struct_name Helper>]::deserialize(deserializer)?; Ok($struct_name) } } #[derive(serde::Serialize, serde::Deserialize)] enum [<$struct_name Type>] { $struct_name, } #[derive(serde::Serialize, serde::Deserialize)] struct [<$struct_name Helper>] { #[allow(dead_code)] r#type: [<$struct_name Type>], } } } } // Re-export macro_rules_attribute pub use macro_rules_attribute::macro_rules_attribute;
tokenizers/tokenizers/src/utils/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/utils/mod.rs", "repo_id": "tokenizers", "token_count": 3092 }
226