text
stringlengths
7
318k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
439
# coding=utf-8 # Copyright 2023 The Intel Labs Team Authors, The Microsoft Research Team Authors and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for BridgeTower. """ from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class BridgeTowerProcessor(ProcessorMixin): r""" Constructs a BridgeTower processor which wraps a Roberta tokenizer and BridgeTower image processor into a single processor. [`BridgeTowerProcessor`] offers all the functionalities of [`BridgeTowerImageProcessor`] and [`RobertaTokenizerFast`]. See the docstring of [`~BridgeTowerProcessor.__call__`] and [`~BridgeTowerProcessor.decode`] for more information. Args: image_processor (`BridgeTowerImageProcessor`): An instance of [`BridgeTowerImageProcessor`]. The image processor is a required input. tokenizer (`RobertaTokenizerFast`): An instance of ['RobertaTokenizerFast`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "BridgeTowerImageProcessor" tokenizer_class = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) def __call__( self, images, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchEncoding: """ This method uses [`BridgeTowerImageProcessor.__call__`] method to prepare image(s) for the model, and [`RobertaTokenizerFast.__call__`] to prepare text for the model. Please refer to the docstring of the above two methods for more information. """ encoding = self.tokenizer( text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs, ) # add pixel_values + pixel_mask encoding_image_processor = self.image_processor( images, return_tensors=return_tensors, do_normalize=True, do_center_crop=True, **kwargs ) encoding.update(encoding_image_processor) return encoding def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
transformers/src/transformers/models/bridgetower/processing_bridgetower.py/0
{ "file_path": "transformers/src/transformers/models/bridgetower/processing_bridgetower.py", "repo_id": "transformers", "token_count": 1929 }
316
# coding=utf-8 # Copyright Google AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ CANINE model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json", # See all CANINE models at https://huggingface.co/models?filter=canine } class CanineConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`CanineModel`]. It is used to instantiate an CANINE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CANINE [google/canine-s](https://huggingface.co/google/canine-s) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the deep Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoders. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoders. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoders, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 16384): The maximum sequence length that this model might ever be used with. type_vocab_size (`int`, *optional*, defaults to 16): The vocabulary size of the `token_type_ids` passed when calling [`CanineModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. bos_token_id (`int`, *optional*, defaults to 57344): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 57345): End of stream token id. downsampling_rate (`int`, *optional*, defaults to 4): The rate at which to downsample the original character sequence length before applying the deep Transformer encoder. upsampling_kernel_size (`int`, *optional*, defaults to 4): The kernel size (i.e. the number of characters in each window) of the convolutional projection layer when projecting back from `hidden_size`*2 to `hidden_size`. num_hash_functions (`int`, *optional*, defaults to 8): The number of hash functions to use. Each hash function has its own embedding matrix. num_hash_buckets (`int`, *optional*, defaults to 16384): The number of hash buckets to use. local_transformer_stride (`int`, *optional*, defaults to 128): The stride of the local attention of the first shallow Transformer encoder. Defaults to 128 for good TPU/XLA memory alignment. Example: ```python >>> from transformers import CanineConfig, CanineModel >>> # Initializing a CANINE google/canine-s style configuration >>> configuration = CanineConfig() >>> # Initializing a model (with random weights) from the google/canine-s style configuration >>> model = CanineModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "canine" def __init__( self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=16384, type_vocab_size=16, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, bos_token_id=0xE000, eos_token_id=0xE001, downsampling_rate=4, upsampling_kernel_size=4, num_hash_functions=8, num_hash_buckets=16384, local_transformer_stride=128, # Good TPU/XLA memory alignment. **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps # Character config: self.downsampling_rate = downsampling_rate self.upsampling_kernel_size = upsampling_kernel_size self.num_hash_functions = num_hash_functions self.num_hash_buckets = num_hash_buckets self.local_transformer_stride = local_transformer_stride
transformers/src/transformers/models/canine/configuration_canine.py/0
{ "file_path": "transformers/src/transformers/models/canine/configuration_canine.py", "repo_id": "transformers", "token_count": 2532 }
317
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Audio/Text processor class for CLAP """ from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class ClapProcessor(ProcessorMixin): r""" Constructs a CLAP processor which wraps a CLAP feature extractor and a RoBerta tokenizer into a single processor. [`ClapProcessor`] offers all the functionalities of [`ClapFeatureExtractor`] and [`RobertaTokenizerFast`]. See the [`~ClapProcessor.__call__`] and [`~ClapProcessor.decode`] for more information. Args: feature_extractor ([`ClapFeatureExtractor`]): The audio processor is a required input. tokenizer ([`RobertaTokenizerFast`]): The tokenizer is a required input. """ feature_extractor_class = "ClapFeatureExtractor" tokenizer_class = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__(self, text=None, audios=None, return_tensors=None, **kwargs): """ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text` and `kwargs` arguments to RobertaTokenizerFast's [`~RobertaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the audio(s), this method forwards the `audios` and `kwrags` arguments to ClapFeatureExtractor's [`~ClapFeatureExtractor.__call__`] if `audios` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). audios (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`): The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels, and T the sample length of the audio. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **audio_features** -- Audio features to be fed to a model. Returned when `audios` is not `None`. """ sampling_rate = kwargs.pop("sampling_rate", None) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none.") if text is not None: encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) if audios is not None: audio_features = self.feature_extractor( audios, sampling_rate=sampling_rate, return_tensors=return_tensors, **kwargs ) if text is not None and audios is not None: encoding["input_features"] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**audio_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
transformers/src/transformers/models/clap/processing_clap.py/0
{ "file_path": "transformers/src/transformers/models/clap/processing_clap.py", "repo_id": "transformers", "token_count": 2177 }
318
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Image/Text processor class for CLIPSeg """ import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class CLIPSegProcessor(ProcessorMixin): r""" Constructs a CLIPSeg processor which wraps a CLIPSeg image processor and a CLIP tokenizer into a single processor. [`CLIPSegProcessor`] offers all the functionalities of [`ViTImageProcessor`] and [`CLIPTokenizerFast`]. See the [`~CLIPSegProcessor.__call__`] and [`~CLIPSegProcessor.decode`] for more information. Args: image_processor ([`ViTImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`CLIPTokenizerFast`], *optional*): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "ViTImageProcessor" tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) def __call__(self, text=None, images=None, visual_prompt=None, return_tensors=None, **kwargs): """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to ViTImageProcessor's [`~ViTImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. visual_prompt (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The visual prompt image or batch of images to be prepared. Each visual prompt image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images.") if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt.") if text is not None: encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) if visual_prompt is not None: prompt_features = self.image_processor(visual_prompt, return_tensors=return_tensors, **kwargs) if images is not None: image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs) if visual_prompt is not None and images is not None: encoding = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: encoding["pixel_values"] = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: encoding = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def feature_extractor_class(self): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", FutureWarning, ) return self.image_processor_class @property def feature_extractor(self): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", FutureWarning, ) return self.image_processor
transformers/src/transformers/models/clipseg/processing_clipseg.py/0
{ "file_path": "transformers/src/transformers/models/clipseg/processing_clipseg.py", "repo_id": "transformers", "token_count": 3092 }
319
# coding=utf-8 # Copyright 2022 The Salesforce authors, The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for OpenAI GPT.""" import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json", }, "merges_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt", }, "tokenizer_file": { "Salesforce/codegen-350M-mono": ( "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json" ), }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "Salesforce/codegen-350M-mono": 2048, } class CodeGenTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" CodeGen tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import CodeGenTokenizerFast >>> tokenizer = CodeGenTokenizerFast.from_pretrained("Salesforce/codegen-350M-mono") >>> tokenizer("Hello world")["input_ids"] [15496, 995] >>> tokenizer(" Hello world")["input_ids"] [18435, 995] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. </Tip> This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`, *optional*): Path to the vocabulary file. merges_file (`str`, *optional*): Path to the merges file. tokenizer_file (`str`, *optional*): Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that contains everything needed to load the tokenizer. unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The end of sequence token. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (CodeGen tokenizer detect beginning of words by the preceding space). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = CodeGenTokenizer def __init__( self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", add_prefix_space=False, **kwargs, ): super().__init__( vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_prefix_space=add_prefix_space, **kwargs, ) if kwargs.pop("add_bos_token", False): model_id = kwargs.pop("name_or_path", "") raise ValueError( "Currenty GPT2's fast tokenizer does NOT support adding a BOS token. " "Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n" f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n" f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n" "This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005." " so that the fast tokenizer works correctly." ) pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space: pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type")) pre_tok_state["add_prefix_space"] = add_prefix_space self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) self.add_prefix_space = add_prefix_space def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get("is_split_into_words", False) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get("is_split_into_words", False) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*args, **kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) def decode( self, token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, truncate_before_pattern: Optional[List[str]] = None, **kwargs, ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. Args: token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. If `None`, will default to `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`). truncate_before_pattern (`List[str]`, *optional*, defaults to `None`): A list of regular expression strings that will be used to truncate the returned string. This can be used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`. kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `str`: The decoded sentence. """ decoded_text = super().decode( token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs, ) if truncate_before_pattern is not None and len(truncate_before_pattern) > 0: decoded_text = self.truncate(decoded_text, truncate_before_pattern) return decoded_text def truncate(self, completion, truncate_before_pattern): def find_re(string, pattern, start_pos): m = pattern.search(string, start_pos) return m.start() if m else -1 terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern] prints = list(re.finditer("^print", completion, re.MULTILINE)) if len(prints) > 1: completion = completion[: prints[1].start()] defs = list(re.finditer("^def", completion, re.MULTILINE)) if len(defs) > 1: completion = completion[: defs[1].start()] start_pos = 0 terminals_pos = [ pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1 ] if len(terminals_pos) > 0: return completion[: min(terminals_pos)] else: return completion
transformers/src/transformers/models/codegen/tokenization_codegen_fast.py/0
{ "file_path": "transformers/src/transformers/models/codegen/tokenization_codegen_fast.py", "repo_id": "transformers", "token_count": 4249 }
320
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ConvNext checkpoints from the original repository. URL: https://github.com/facebookresearch/ConvNeXt""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, ConvNextForImageClassification, ConvNextImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_convnext_config(checkpoint_url): config = ConvNextConfig() if "tiny" in checkpoint_url: depths = [3, 3, 9, 3] hidden_sizes = [96, 192, 384, 768] if "small" in checkpoint_url: depths = [3, 3, 27, 3] hidden_sizes = [96, 192, 384, 768] if "base" in checkpoint_url: depths = [3, 3, 27, 3] hidden_sizes = [128, 256, 512, 1024] if "large" in checkpoint_url: depths = [3, 3, 27, 3] hidden_sizes = [192, 384, 768, 1536] if "xlarge" in checkpoint_url: depths = [3, 3, 27, 3] hidden_sizes = [256, 512, 1024, 2048] if "1k" in checkpoint_url: num_labels = 1000 filename = "imagenet-1k-id2label.json" expected_shape = (1, 1000) else: num_labels = 21841 filename = "imagenet-22k-id2label.json" expected_shape = (1, 21841) repo_id = "huggingface/label-files" config.num_labels = num_labels id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} if "1k" not in checkpoint_url: # this dataset contains 21843 labels but the model only has 21841 # we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18 del id2label[9205] del id2label[15027] config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} config.hidden_sizes = hidden_sizes config.depths = depths return config, expected_shape def rename_key(name): if "downsample_layers.0.0" in name: name = name.replace("downsample_layers.0.0", "embeddings.patch_embeddings") if "downsample_layers.0.1" in name: name = name.replace("downsample_layers.0.1", "embeddings.norm") # we rename to layernorm later on if "downsample_layers.1.0" in name: name = name.replace("downsample_layers.1.0", "stages.1.downsampling_layer.0") if "downsample_layers.1.1" in name: name = name.replace("downsample_layers.1.1", "stages.1.downsampling_layer.1") if "downsample_layers.2.0" in name: name = name.replace("downsample_layers.2.0", "stages.2.downsampling_layer.0") if "downsample_layers.2.1" in name: name = name.replace("downsample_layers.2.1", "stages.2.downsampling_layer.1") if "downsample_layers.3.0" in name: name = name.replace("downsample_layers.3.0", "stages.3.downsampling_layer.0") if "downsample_layers.3.1" in name: name = name.replace("downsample_layers.3.1", "stages.3.downsampling_layer.1") if "stages" in name and "downsampling_layer" not in name: # stages.0.0. for instance should be renamed to stages.0.layers.0. name = name[: len("stages.0")] + ".layers" + name[len("stages.0") :] if "stages" in name: name = name.replace("stages", "encoder.stages") if "norm" in name: name = name.replace("norm", "layernorm") if "gamma" in name: name = name.replace("gamma", "layer_scale_parameter") if "head" in name: name = name.replace("head", "classifier") return name # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_convnext_checkpoint(checkpoint_url, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our ConvNext structure. """ # define ConvNext configuration based on URL config, expected_shape = get_convnext_config(checkpoint_url) # load original state_dict from URL state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)["model"] # rename keys for key in state_dict.copy().keys(): val = state_dict.pop(key) state_dict[rename_key(key)] = val # add prefix to all keys expect classifier head for key in state_dict.copy().keys(): val = state_dict.pop(key) if not key.startswith("classifier"): key = "convnext." + key state_dict[key] = val # load HuggingFace model model = ConvNextForImageClassification(config) model.load_state_dict(state_dict) model.eval() # Check outputs on an image, prepared by ConvNextImageProcessor size = 224 if "224" in checkpoint_url else 384 image_processor = ConvNextImageProcessor(size=size) pixel_values = image_processor(images=prepare_img(), return_tensors="pt").pixel_values logits = model(pixel_values).logits # note: the logits below were obtained without center cropping if checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth": expected_logits = torch.tensor([-0.1210, -0.6605, 0.1918]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth": expected_logits = torch.tensor([-0.4473, -0.1847, -0.6365]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth": expected_logits = torch.tensor([0.4525, 0.7539, 0.0308]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_384.pth": expected_logits = torch.tensor([0.3561, 0.6350, -0.0384]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth": expected_logits = torch.tensor([0.4174, -0.0989, 0.1489]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_384.pth": expected_logits = torch.tensor([0.2513, -0.1349, -0.1613]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth": expected_logits = torch.tensor([1.2980, 0.3631, -0.1198]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth": expected_logits = torch.tensor([1.2963, 0.1227, 0.1723]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth": expected_logits = torch.tensor([1.7956, 0.8390, 0.2820]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth": expected_logits = torch.tensor([-0.2822, -0.0502, -0.0878]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth": expected_logits = torch.tensor([-0.5672, -0.0730, -0.4348]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth": expected_logits = torch.tensor([0.2681, 0.2365, 0.6246]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth": expected_logits = torch.tensor([-0.2642, 0.3931, 0.5116]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth": expected_logits = torch.tensor([-0.6677, -0.1873, -0.8379]) elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth": expected_logits = torch.tensor([-0.7749, -0.2967, -0.6444]) else: raise ValueError(f"Unknown URL: {checkpoint_url}") assert torch.allclose(logits[0, :3], expected_logits, atol=1e-3) assert logits.shape == expected_shape Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) print("Pushing model to the hub...") model_name = "convnext" if "tiny" in checkpoint_url: model_name += "-tiny" elif "small" in checkpoint_url: model_name += "-small" elif "base" in checkpoint_url: model_name += "-base" elif "xlarge" in checkpoint_url: model_name += "-xlarge" elif "large" in checkpoint_url: model_name += "-large" if "224" in checkpoint_url: model_name += "-224" elif "384" in checkpoint_url: model_name += "-384" if "22k" in checkpoint_url and "1k" not in checkpoint_url: model_name += "-22k" if "22k" in checkpoint_url and "1k" in checkpoint_url: model_name += "-22k-1k" model.push_to_hub( repo_path_or_name=Path(pytorch_dump_folder_path, model_name), organization="nielsr", commit_message="Add model", ) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth", type=str, help="URL of the original ConvNeXT checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model directory.", ) args = parser.parse_args() convert_convnext_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
transformers/src/transformers/models/convnext/convert_convnext_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/convnext/convert_convnext_to_pytorch.py", "repo_id": "transformers", "token_count": 4224 }
321
# coding=utf-8 # Copyright 2022 The OpenBMB Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for CPMAnt.""" import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "openbmb/cpm-ant-10b": 1024, } def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab class WordpieceTokenizer(object): def __init__(self, vocab, unk_token="<unk>", max_input_chars_per_word=200): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, token): chars = list(token) if len(chars) > self.max_input_chars_per_word: return [self.unk_token] start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token) start += 1 else: sub_tokens.append(cur_substr) start = end return sub_tokens class CpmAntTokenizer(PreTrainedTokenizer): """ Construct a CPMAnt tokenizer. Based on byte-level Byte-Pair-Encoding. Args: vocab_file (`str`): Path to the vocabulary file. bod_token (`str`, *optional*, defaults to `"<d>"`): The beginning of document token. eod_token (`str`, *optional*, defaults to `"</d>"`): The end of document token. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. line_token (`str`, *optional*, defaults to `"</n>"`): The line token. space_token (`str`, *optional*, defaults to `"</_>"`): The space token. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] add_prefix_space = False def __init__( self, vocab_file, bod_token="<d>", eod_token="</d>", bos_token="<s>", eos_token="</s>", pad_token="<pad>", unk_token="<unk>", line_token="</n>", space_token="</_>", padding_side="left", **kwargs, ): requires_backends(self, ["jieba"]) self.bod_token = bod_token self.eod_token = eod_token self.encoder = load_vocab(vocab_file) self.encoder[" "] = self.encoder[space_token] self.encoder["\n"] = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] self.encoder = collections.OrderedDict(sorted(self.encoder.items(), key=lambda x: x[1])) self.decoder = {v: k for k, v in self.encoder.items()} self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.encoder, unk_token=unk_token) super().__init__( bod_token=bod_token, eod_token=eod_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, unk_token=unk_token, line_token=line_token, space_token=space_token, padding_side=padding_side, **kwargs, ) @property def bod_token_id(self): return self.encoder[self.bod_token] @property def eod_token_id(self): return self.encoder[self.eod_token] @property def newline_id(self): return self.encoder["\n"] @property def vocab_size(self) -> int: return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def _tokenize(self, text): """Tokenize a string.""" output_tokens = [] for x in jieba.cut(text, cut_all=False): output_tokens.extend(self.wordpiece_tokenizer.tokenize(x)) return output_tokens def _decode(self, token_ids, **kwargs): """Decode ids into a string.""" token_ids = [i for i in token_ids if i >= 0] token_ids = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(token_ids, **kwargs) def check(self, token): return token in self.encoder def convert_tokens_to_string(self, tokens: List[str]) -> str: return "".join(tokens) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory index = 0 if " " in self.encoder: self.encoder["</_>"] = self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: self.encoder["</n>"] = self.encoder["\n"] del self.encoder["\n"] self.encoder = collections.OrderedDict(sorted(self.encoder.items(), key=lambda x: x[1])) with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: List[int] = None) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A CPMAnt sequence has the following format: - single sequence: `[BOS] Sequence`. Args: token_ids_0 (`List[int]`): The first tokenized sequence that special tokens will be added. token_ids_1 (`List[int]`): The optional second tokenized sequence that special tokens will be added. Returns: `List[int]`: The model input with special tokens. """ if token_ids_1 is None: return [self.bos_token_id] + token_ids_0 return [self.bos_token_id] + token_ids_0 + [self.bos_token_id] + token_ids_1 def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) return [1] + ([0] * len(token_ids_0))
transformers/src/transformers/models/cpmant/tokenization_cpmant.py/0
{ "file_path": "transformers/src/transformers/models/cpmant/tokenization_cpmant.py", "repo_id": "transformers", "token_count": 4580 }
322
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert data2vec checkpoint.""" import argparse import os import pathlib import fairseq import torch from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import ( Data2VecTextConfig, Data2VecTextForMaskedLM, Data2VecTextForSequenceClassification, Data2VecTextModel, ) from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) # IMPORTANT: In order for this script to run, please make sure to download the dictionary: `dict.txt` from wget https://dl.fbaipublicfiles.com/fairseq/models/roberta.large.tar.gz # File copied from https://github.com/pytorch/fairseq/blob/main/examples/data2vec/models/data2vec_text.py from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("0.9.0"): raise Exception("requires fairseq >= 0.9.0") logging.set_verbosity_info() logger = logging.get_logger(__name__) SAMPLE_TEXT = "Hello world! cécé herlolip" def convert_data2vec_checkpoint_to_pytorch( data2vec_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool ): """ Copy/paste/tweak data2vec's weights to our BERT structure. """ data2vec_checkpoint_dir, data2vec_checkpoint_file_name = os.path.split(data2vec_checkpoint_path) data2vec = Data2VecTextModel.from_pretrained( data2vec_checkpoint_dir, checkpoint_file=data2vec_checkpoint_file_name ) data2vec.eval() # disable dropout data2vec_model = data2vec.models[0] data2vec_sent_encoder = data2vec_model.encoder.sentence_encoder config = Data2VecTextConfig( vocab_size=data2vec_sent_encoder.embed_tokens.num_embeddings, hidden_size=data2vec_model.args.encoder_embed_dim, num_hidden_layers=data2vec_model.args.encoder_layers, num_attention_heads=data2vec_model.args.encoder_attention_heads, intermediate_size=data2vec_model.args.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-5, # PyTorch default used in fairseq ) if classification_head: config.num_labels = data2vec.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our BERT config:", config) model = Data2VecTextForSequenceClassification(config) if classification_head else Data2VecTextForMaskedLM(config) model.eval() # Now let's copy all the weights. # Embeddings model.data2vec_text.embeddings.word_embeddings.weight = data2vec_sent_encoder.embed_tokens.weight model.data2vec_text.embeddings.position_embeddings.weight = data2vec_sent_encoder.embed_positions.weight model.data2vec_text.embeddings.token_type_embeddings.weight.data = torch.zeros_like( model.data2vec_text.embeddings.token_type_embeddings.weight ) # just zero them out b/c data2vec doesn't use them. model.data2vec_text.embeddings.LayerNorm.weight = data2vec_sent_encoder.layernorm_embedding.weight model.data2vec_text.embeddings.LayerNorm.bias = data2vec_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers): # Encoder: start of layer layer: BertLayer = model.data2vec_text.encoder.layer[i] data2vec_layer: TransformerSentenceEncoderLayer = data2vec_sent_encoder.layers[i] # self attention self_attn: BertSelfAttention = layer.attention.self assert data2vec_layer.self_attn.k_proj.weight.data.shape == torch.Size( (config.hidden_size, config.hidden_size) ), ( "Shape for data2vec_layer.self_attn.k_proj.weight.data should be" f" {torch.Size((config.hidden_size, config.hidden_size))}" ) assert data2vec_layer.self_attn.q_proj.weight.data.shape == torch.Size( (config.hidden_size, config.hidden_size) ), ( "Shape for data2vec_layer.self_attn.q_proj.weight.data should be" f" {torch.Size((config.hidden_size, config.hidden_size))}" ) assert data2vec_layer.self_attn.v_proj.weight.data.shape == torch.Size( (config.hidden_size, config.hidden_size) ), ( "Shape for data2vec_layer.self_attn.v_proj.weight.data should be" f" {torch.Size((config.hidden_size, config.hidden_size))}" ) self_attn.query.weight.data = data2vec_layer.self_attn.q_proj.weight self_attn.query.bias.data = data2vec_layer.self_attn.q_proj.bias self_attn.key.weight.data = data2vec_layer.self_attn.k_proj.weight self_attn.key.bias.data = data2vec_layer.self_attn.k_proj.bias self_attn.value.weight.data = data2vec_layer.self_attn.v_proj.weight self_attn.value.bias.data = data2vec_layer.self_attn.v_proj.bias # self-attention output self_output: BertSelfOutput = layer.attention.output assert ( self_output.dense.weight.shape == data2vec_layer.self_attn.out_proj.weight.shape ), f"Shape for self_output.dense.weight should be {data2vec_layer.self_attn.out_proj.weight.shape}" self_output.dense.weight = data2vec_layer.self_attn.out_proj.weight self_output.dense.bias = data2vec_layer.self_attn.out_proj.bias self_output.LayerNorm.weight = data2vec_layer.self_attn_layer_norm.weight self_output.LayerNorm.bias = data2vec_layer.self_attn_layer_norm.bias # intermediate intermediate: BertIntermediate = layer.intermediate assert ( intermediate.dense.weight.shape == data2vec_layer.fc1.weight.shape ), f"Shape for intermediate.dense.weight should be {data2vec_layer.fc1.weight.shape}" intermediate.dense.weight = data2vec_layer.fc1.weight intermediate.dense.bias = data2vec_layer.fc1.bias # output bert_output: BertOutput = layer.output assert ( bert_output.dense.weight.shape == data2vec_layer.fc2.weight.shape ), f"Shape for bert_output.dense.weight should be {data2vec_layer.fc2.weight.shape}" bert_output.dense.weight = data2vec_layer.fc2.weight bert_output.dense.bias = data2vec_layer.fc2.bias bert_output.LayerNorm.weight = data2vec_layer.final_layer_norm.weight bert_output.LayerNorm.bias = data2vec_layer.final_layer_norm.bias # end of layer if classification_head: model.classifier.dense.weight = data2vec.model.classification_heads["mnli"].dense.weight model.classifier.dense.bias = data2vec.model.classification_heads["mnli"].dense.bias model.classifier.out_proj.weight = data2vec.model.classification_heads["mnli"].out_proj.weight model.classifier.out_proj.bias = data2vec.model.classification_heads["mnli"].out_proj.bias else: # LM Head model.lm_head.dense.weight = data2vec_model.encoder.lm_head.dense.weight model.lm_head.dense.bias = data2vec_model.encoder.lm_head.dense.bias model.lm_head.layer_norm.weight = data2vec_model.encoder.lm_head.layer_norm.weight model.lm_head.layer_norm.bias = data2vec_model.encoder.lm_head.layer_norm.bias model.lm_head.decoder.weight = data2vec_model.encoder.lm_head.weight model.lm_head.decoder.bias = data2vec_model.encoder.lm_head.bias # Let's check that we get the same results. input_ids: torch.Tensor = data2vec.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1 our_output = model(input_ids)[0] if classification_head: their_output = data2vec.model.classification_heads["mnli"](data2vec.extract_features(input_ids)) else: their_output = data2vec_model(input_ids)[0] print(our_output.shape, their_output.shape) max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item() print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7 success = torch.allclose(our_output, their_output, atol=1e-3) print("Do both models output the same tensors?", "🔥" if success else "💩") if not success: raise Exception("Something went wRoNg") pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) args = parser.parse_args() convert_data2vec_checkpoint_to_pytorch( args.checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
transformers/src/transformers/models/data2vec/convert_data2vec_text_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/data2vec/convert_data2vec_text_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 3894 }
323
# coding=utf-8 # Copyright 2020 Microsoft and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for model DeBERTa.""" import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as sp from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/spm.model", "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/spm.model", "microsoft/deberta-v2-xlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/spm.model" ), "microsoft/deberta-v2-xxlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/spm.model" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "microsoft/deberta-v2-xlarge": 512, "microsoft/deberta-v2-xxlarge": 512, "microsoft/deberta-v2-xlarge-mnli": 512, "microsoft/deberta-v2-xxlarge-mnli": 512, } PRETRAINED_INIT_CONFIGURATION = { "microsoft/deberta-v2-xlarge": {"do_lower_case": False}, "microsoft/deberta-v2-xxlarge": {"do_lower_case": False}, "microsoft/deberta-v2-xlarge-mnli": {"do_lower_case": False}, "microsoft/deberta-v2-xxlarge-mnli": {"do_lower_case": False}, } VOCAB_FILES_NAMES = {"vocab_file": "spm.model"} class DebertaV2Tokenizer(PreTrainedTokenizer): r""" Constructs a DeBERTa-v2 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the input when tokenizing. bos_token (`string`, *optional*, defaults to `"[CLS]"`): The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token. When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. eos_token (`string`, *optional*, defaults to `"[SEP]"`): The end of sequence token. When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, do_lower_case=False, split_by_punct=False, bos_token="[CLS]", eos_token="[SEP]", unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.do_lower_case = do_lower_case self.split_by_punct = split_by_punct self.vocab_file = vocab_file self._tokenizer = SPMTokenizer( vocab_file, None, split_by_punct=split_by_punct, sp_model_kwargs=self.sp_model_kwargs ) unk_token = AddedToken(unk_token, normalized=True, special=True) if isinstance(unk_token, str) else unk_token super().__init__( do_lower_case=do_lower_case, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, split_by_punct=split_by_punct, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) self._tokenizer.special_tokens = self.all_special_tokens @property def vocab_size(self): return len(self.vocab) @property def vocab(self): return self._tokenizer.vocab def get_vocab(self): vocab = self.vocab.copy() vocab.update(self.get_added_vocab()) return vocab def _tokenize(self, text: str) -> List[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words""" if self.do_lower_case: text = text.lower() return self._tokenizer.tokenize(text) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self._tokenizer.spm.PieceToId(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self._tokenizer.spm.IdToPiece(index) if index < self.vocab_size else self.unk_token def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" return self._tokenizer.decode(tokens) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A DeBERTa sequence has the following format: - single sequence: [CLS] X [SEP] - pair of sequences: [CLS] A [SEP] B [SEP] Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", False) if is_split_into_words or add_prefix_space: text = " " + text return (text, kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: return self._tokenizer.save_pretrained(save_directory, filename_prefix=filename_prefix) class SPMTokenizer: r""" Constructs a tokenizer based on [SentencePiece](https://github.com/google/sentencepiece). Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. """ def __init__( self, vocab_file, special_tokens, split_by_punct=False, sp_model_kwargs: Optional[Dict[str, Any]] = None ): self.split_by_punct = split_by_punct self.vocab_file = vocab_file self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs spm = sp.SentencePieceProcessor(**self.sp_model_kwargs) if not os.path.exists(vocab_file): raise FileNotFoundError(f"{vocab_file} does not exist!") spm.load(vocab_file) bpe_vocab_size = spm.GetPieceSize() # Token map # <unk> 0+1 # <s> 1+1 # </s> 2+1 self.vocab = {spm.IdToPiece(i): i for i in range(bpe_vocab_size)} self.ids_to_tokens = [spm.IdToPiece(i) for i in range(bpe_vocab_size)] # self.vocab['[PAD]'] = 0 # self.vocab['[CLS]'] = 1 # self.vocab['[SEP]'] = 2 # self.vocab['[UNK]'] = 3 self.spm = spm self.special_tokens = special_tokens def __getstate__(self): state = self.__dict__.copy() state["spm"] = None return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.spm = sp.SentencePieceProcessor(**self.sp_model_kwargs) self.spm.Load(self.vocab_file) def tokenize(self, text): return self._encode_as_pieces(text) def convert_ids_to_tokens(self, ids): tokens = [] for i in ids: tokens.append(self.ids_to_tokens[i]) return tokens def decode(self, tokens, start=-1, end=-1, raw_text=None): if raw_text is None: current_sub_tokens = [] out_string = "" prev_is_special = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.special_tokens: if not prev_is_special: out_string += " " out_string += self.spm.decode_pieces(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.spm.decode_pieces(current_sub_tokens) return out_string.strip() else: words = self.split_to_words(raw_text) word_tokens = [self.tokenize(w) for w in words] token2words = [0] * len(tokens) tid = 0 for i, w in enumerate(word_tokens): for k, t in enumerate(w): token2words[tid] = i tid += 1 word_start = token2words[start] word_end = token2words[end] if end < len(tokens) else len(words) text = "".join(words[word_start:word_end]) return text # TODO add a deprecation cycle as this can have different behaviour from our API def add_special_token(self, token): if token not in self.special_tokens: self.special_tokens.append(token) if token not in self.vocab: self.vocab[token] = len(self.vocab) - 1 self.ids_to_tokens.append(token) return self.id(token) def part_of_whole_word(self, token, is_bos=False): logger.warning_once( "The `DebertaTokenizer.part_of_whole_word` method is deprecated and will be removed in `transformers==4.35`" ) if is_bos: return True if ( len(token) == 1 and (_is_whitespace(list(token)[0]) or _is_control(list(token)[0]) or _is_punctuation(list(token)[0])) ) or token in self.special_tokens: return False word_start = b"\xe2\x96\x81".decode("utf-8") return not token.startswith(word_start) def pad(self): return "[PAD]" def bos(self): return "[CLS]" def eos(self): return "[SEP]" def unk(self): return "[UNK]" def mask(self): return "[MASK]" def sym(self, id): return self.ids_to_tokens[id] def id(self, sym): logger.warning_once( "The `DebertaTokenizer.id` method is deprecated and will be removed in `transformers==4.35`" ) return self.vocab[sym] if sym in self.vocab else 1 def _encode_as_pieces(self, text): text = convert_to_unicode(text) if self.split_by_punct: words = self._run_split_on_punc(text) pieces = [self.spm.encode(w, out_type=str) for w in words] return [p for w in pieces for p in w] else: return self.spm.encode(text, out_type=str) def split_to_words(self, text): pieces = self._encode_as_pieces(text) word_start = b"\xe2\x96\x81".decode("utf-8") words = [] offset = 0 prev_end = 0 for i, p in enumerate(pieces): if p.startswith(word_start): if offset > prev_end: words.append(text[prev_end:offset]) prev_end = offset w = p.replace(word_start, "") else: w = p try: s = text.index(w, offset) pn = "" k = i + 1 while k < len(pieces): pn = pieces[k].replace(word_start, "") if len(pn) > 0: break k += 1 if len(pn) > 0 and pn in text[offset:s]: offset = offset + 1 else: offset = s + len(w) except Exception: offset = offset + 1 if prev_end < offset: words.append(text[prev_end:offset]) return words def _run_split_on_punc(self, text): """Splits punctuation on a piece of text.""" chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def save_pretrained(self, path: str, filename_prefix: str = None): filename = VOCAB_FILES_NAMES[list(VOCAB_FILES_NAMES.keys())[0]] if filename_prefix is not None: filename = filename_prefix + "-" + filename full_path = os.path.join(path, filename) with open(full_path, "wb") as fs: fs.write(self.spm.serialized_model_proto()) return (full_path,) def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False def convert_to_unicode(text): """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError(f"Unsupported string type: {type(text)}")
transformers/src/transformers/models/deberta_v2/tokenization_deberta_v2.py/0
{ "file_path": "transformers/src/transformers/models/deberta_v2/tokenization_deberta_v2.py", "repo_id": "transformers", "token_count": 9944 }
324
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for DeiT.""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) class DeiTImageProcessor(BaseImageProcessor): r""" Constructs a DeiT image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in `preprocess`. size (`Dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`): Size of the image after `resize`. Can be overridden by `size` in `preprocess`. resample (`PILImageResampling` filter, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in `preprocess`. crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): Desired output size when applying center-cropping. Can be overridden by `crop_size` in `preprocess`. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PIL.Image.BICUBIC, do_center_crop: bool = True, crop_size: Dict[str, int] = None, rescale_factor: Union[int, float] = 1 / 255, do_rescale: bool = True, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 256, "width": 256} size = get_size_dict(size) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size, param_name="crop_size") self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample=None, do_center_crop: bool = None, crop_size: Dict[str, int] = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after `resize`. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): PILImageResampling filter to use if resizing the image Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be padded with zeros and then cropped do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - `None`: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_center_crop: images = [ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
transformers/src/transformers/models/deit/image_processing_deit.py/0
{ "file_path": "transformers/src/transformers/models/deit/image_processing_deit.py", "repo_id": "transformers", "token_count": 6426 }
325
# coding=utf-8 # Copyright 2023 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Open-Llama model.""" import math from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....activations import ACT2FN from ....modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from ....modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast from ....modeling_utils import PreTrainedModel from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_open_llama import OpenLlamaConfig logger = logging.get_logger(__name__) try: from xformers import ops as xops except ImportError: xops = None _CONFIG_FOR_DOC = "OpenLlamaConfig" # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->OpenLlama class OpenLlamaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ OpenLlamaRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) # Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->OpenLlama class OpenLlamaRotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache( seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() ) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) freqs = torch.outer(t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( self.cos_cached[:seq_len].to(dtype=x.dtype), self.sin_cached[:seq_len].to(dtype=x.dtype), ) # Copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->OpenLlama class OpenLlamaLinearScalingRotaryEmbedding(OpenLlamaRotaryEmbedding): """OpenLlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) t = t / self.scaling_factor freqs = torch.outer(t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) # Copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->OpenLlama class OpenLlamaDynamicNTKScalingRotaryEmbedding(OpenLlamaRotaryEmbedding): """OpenLlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len if seq_len > self.max_position_embeddings: base = self.base * ( (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) ) ** (self.dim / (self.dim - 2)) inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) freqs = torch.outer(t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`): The position indices of the tokens corresponding to the query and key tensors. For example, this can be used to pass offsetted position ids when working with a KV-cache. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos[position_ids].unsqueeze(unsqueeze_dim) sin = sin[position_ids].unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class OpenLlamaMLP(nn.Module): def __init__( self, hidden_size: int, intermediate_size: int, hidden_act: str, dropout_prob: float, ): super().__init__() self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.act_fn = ACT2FN[hidden_act] self.dropout = nn.Dropout(dropout_prob) def forward(self, x): out = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return self.dropout(out) class OpenLlamaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: OpenLlamaConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.max_position_embeddings = config.max_position_embeddings self.dropout_prob = config.attention_dropout_prob if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self._init_rope() # Copied from transformers.models.llama.modeling_llama.LlamaAttention._init_rope with Llama->OpenLlama def _init_rope(self): if self.config.rope_scaling is None: self.rotary_emb = OpenLlamaRotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) else: scaling_type = self.config.rope_scaling["type"] scaling_factor = self.config.rope_scaling["factor"] if scaling_type == "linear": self.rotary_emb = OpenLlamaLinearScalingRotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta, ) elif scaling_type == "dynamic": self.rotary_emb = OpenLlamaDynamicNTKScalingRotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta, ) else: raise ValueError(f"Unknown RoPE scaling type {scaling_type}") def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) # [bsz, nh, t, hd] if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None if self.config.use_memory_efficient_attention and xops is not None and self.training: attn_weights = None query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_output = xops.memory_efficient_attention( query_states, key_states, value_states, attn_bias=xops.LowerTriangularMask(), p=self.dropout_prob ) else: attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask attn_weights = torch.max( attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device) ) # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value class OpenLlamaDecoderLayer(nn.Module): def __init__(self, config: OpenLlamaConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = OpenLlamaAttention(config=config) self.mlp = OpenLlamaMLP( hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, dropout_prob=config.hidden_dropout_prob, ) self.input_layernorm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs OPEN_LLAMA_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`OpenLlamaConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare Open-Llama Model outputting raw hidden-states without any specific head on top.", OPEN_LLAMA_START_DOCSTRING, ) class OpenLlamaPreTrainedModel(PreTrainedModel): config_class = OpenLlamaConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["OpenLlamaDecoderLayer"] def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): if self.config.use_stable_embedding: torch.nn.init.xavier_normal_(module.weight.data) else: module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() OPEN_LLAMA_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Open-Llama Model outputting raw hidden-states without any specific head on top.", OPEN_LLAMA_START_DOCSTRING, ) class OpenLlamaModel(OpenLlamaPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OpenLlamaDecoderLayer`] Args: config: OpenLlamaConfig """ def __init__(self, config: OpenLlamaConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) if config.use_stable_embedding: self.embed_layer_norm = nn.LayerNorm(config.hidden_size) else: self.embed_layer_norm = None self.layers = nn.ModuleList([OpenLlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)]) self.norm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") seq_length_with_past = seq_length past_key_values_length = 0 if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if self.embed_layer_norm: inputs_embeds = self.embed_layer_norm(inputs_embeds) # embed positions if self.config.use_memory_efficient_attention and self.training: attention_mask = None elif attention_mask is None: attention_mask = torch.ones( (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device ) input_shape = (batch_size, seq_length) attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) hidden_states = inputs_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, position_ids, None, output_attentions, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) class OpenLlamaForCausalLM(OpenLlamaPreTrainedModel): def __init__(self, config): super().__init__(config) self.model = OpenLlamaModel(config) if config.shared_input_output_embedding: self.lm_head = None else: self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, OpenLlamaForCausalLM >>> model = OpenLlamaForCausalLM.from_pretrained("openlm-research/open_llama_7b") >>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.shared_input_output_embedding: logits = torch.einsum( "blh,vh->blv", hidden_states.to(self.model.embed_tokens.weight.device), self.model.embed_tokens.weight ) else: logits = self.lm_head(hidden_states) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1] :] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past @add_start_docstrings( """ The LLaMa Model transformer with a sequence classification head on top (linear layer). [`OpenLlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, OPEN_LLAMA_START_DOCSTRING, ) class OpenLlamaForSequenceClassification(OpenLlamaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = OpenLlamaModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )
transformers/src/transformers/models/deprecated/open_llama/modeling_open_llama.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/open_llama/modeling_open_llama.py", "repo_id": "transformers", "token_count": 19119 }
326
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A TF 2.0 Adaptive Softmax for Transformer XL model. """ import tensorflow as tf from ....modeling_tf_utils import keras from ....tf_utils import shape_list class TFAdaptiveSoftmaxMask(keras.layers.Layer): def __init__(self, vocab_size, d_embed, d_proj, cutoffs, div_val=1, keep_order=False, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.d_embed = d_embed self.d_proj = d_proj self.cutoffs = cutoffs + [vocab_size] self.cutoff_ends = [0] + self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters self.keep_order = keep_order self.out_layers = [] self.out_projs = [] def build(self, input_shape): if self.n_clusters > 0: self.cluster_weight = self.add_weight( shape=(self.n_clusters, self.d_embed), initializer="zeros", trainable=True, name="cluster_weight" ) self.cluster_bias = self.add_weight( shape=(self.n_clusters,), initializer="zeros", trainable=True, name="cluster_bias" ) if self.div_val == 1: for i in range(len(self.cutoffs)): if self.d_proj != self.d_embed: weight = self.add_weight( shape=(self.d_embed, self.d_proj), initializer="zeros", trainable=True, name=f"out_projs_._{i}", ) self.out_projs.append(weight) else: self.out_projs.append(None) weight = self.add_weight( shape=(self.vocab_size, self.d_embed), initializer="zeros", trainable=True, name=f"out_layers_._{i}_._weight", ) bias = self.add_weight( shape=(self.vocab_size,), initializer="zeros", trainable=True, name=f"out_layers_._{i}_._bias", ) self.out_layers.append((weight, bias)) else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] d_emb_i = self.d_embed // (self.div_val**i) weight = self.add_weight( shape=(d_emb_i, self.d_proj), initializer="zeros", trainable=True, name=f"out_projs_._{i}" ) self.out_projs.append(weight) weight = self.add_weight( shape=(r_idx - l_idx, d_emb_i), initializer="zeros", trainable=True, name=f"out_layers_._{i}_._weight", ) bias = self.add_weight( shape=(r_idx - l_idx,), initializer="zeros", trainable=True, name=f"out_layers_._{i}_._bias", ) self.out_layers.append((weight, bias)) super().build(input_shape) @staticmethod def _logit(x, W, b, proj=None): y = x if proj is not None: y = tf.einsum("ibd,ed->ibe", y, proj) return tf.einsum("ibd,nd->ibn", y, W) + b @staticmethod def _gather_logprob(logprob, target): lp_size = shape_list(logprob) r = tf.range(lp_size[0], dtype=target.dtype) idx = tf.stack([r, target], 1) return tf.gather_nd(logprob, idx) def call(self, hidden, target, return_mean=True, training=False): head_logprob = 0 if self.n_clusters == 0: output = self._logit(hidden, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0]) if target is not None: loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output) out = tf.nn.log_softmax(output, axis=-1) else: hidden_sizes = shape_list(hidden) out = [] loss = tf.zeros(hidden_sizes[:2]) for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: mask = (target >= l_idx) & (target < r_idx) mask_idx = tf.where(mask) cur_target = tf.boolean_mask(target, mask) - l_idx if self.div_val == 1: cur_W = self.out_layers[0][0][l_idx:r_idx] cur_b = self.out_layers[0][1][l_idx:r_idx] else: cur_W = self.out_layers[i][0] cur_b = self.out_layers[i][1] if i == 0: cur_W = tf.concat([cur_W, self.cluster_weight], 0) cur_b = tf.concat([cur_b, self.cluster_bias], 0) head_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[0]) head_logprob = tf.nn.log_softmax(head_logit) out.append(head_logprob[..., : self.cutoffs[0]]) if target is not None: cur_head_logprob = tf.boolean_mask(head_logprob, mask) cur_logprob = self._gather_logprob(cur_head_logprob, cur_target) else: tail_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[i]) tail_logprob = tf.nn.log_softmax(tail_logit) cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster logprob_i = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(logprob_i) if target is not None: cur_head_logprob = tf.boolean_mask(head_logprob, mask) cur_tail_logprob = tf.boolean_mask(tail_logprob, mask) cur_logprob = self._gather_logprob(cur_tail_logprob, cur_target) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(mask_idx, -cur_logprob, shape_list(loss)) out = tf.concat(out, axis=-1) if target is not None: if return_mean: loss = tf.reduce_mean(loss) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(loss) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(loss, name=self.name, aggregation="mean" if return_mean else "") return out
transformers/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py", "repo_id": "transformers", "token_count": 4106 }
327
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Deformable DETR.""" import pathlib from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...feature_extraction_utils import BatchFeature from ...image_processing_utils import BaseImageProcessor, get_size_dict from ...image_transforms import ( PaddingMode, center_to_corners_format, corners_to_center_format, pad, rescale, resize, rgb_to_id, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, AnnotationFormat, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_batched, is_scaled_image, to_numpy_array, valid_images, validate_annotations, ) from ...utils import ( is_flax_available, is_jax_tensor, is_tf_available, is_tf_tensor, is_torch_available, is_torch_tensor, is_torchvision_available, is_vision_available, logging, ) from ...utils.generic import TensorType if is_torch_available(): import torch if is_torchvision_available(): from torchvision.ops.boxes import batched_nms if is_vision_available(): import PIL logger = logging.get_logger(__name__) # pylint: disable=invalid-name SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC) # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ height, width = image_size if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (height <= width and height == size) or (width <= height and width == size): return height, width if width < height: ow = size oh = int(size * height / width) else: oh = size ow = int(size * width / height) return (oh, ow) # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size def get_resize_output_image_size( input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. If the desired output size is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output image size is computed by keeping the aspect ratio of the input image size. Args: input_image (`np.ndarray`): The image to resize. size (`int` or `Tuple[int, int]` or `List[int]`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ image_size = get_image_size(input_image, input_data_format) if isinstance(size, (list, tuple)): return size return get_size_with_aspect_ratio(image_size, size, max_size) # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn def get_numpy_to_framework_fn(arr) -> Callable: """ Returns a function that converts a numpy array to the framework of the input array. Args: arr (`np.ndarray`): The array to convert. """ if isinstance(arr, np.ndarray): return np.array if is_tf_available() and is_tf_tensor(arr): import tensorflow as tf return tf.convert_to_tensor if is_torch_available() and is_torch_tensor(arr): import torch return torch.tensor if is_flax_available() and is_jax_tensor(arr): import jax.numpy as jnp return jnp.array raise ValueError(f"Cannot convert arrays of type {type(arr)}") # Copied from transformers.models.detr.image_processing_detr.safe_squeeze def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray: """ Squeezes an array, but only if the axis specified has dim 1. """ if axis is None: return arr.squeeze() try: return arr.squeeze(axis=axis) except ValueError: return arr # Copied from transformers.models.detr.image_processing_detr.normalize_annotation def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict: image_height, image_width = image_size norm_annotation = {} for key, value in annotation.items(): if key == "boxes": boxes = value boxes = corners_to_center_format(boxes) boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32) norm_annotation[key] = boxes else: norm_annotation[key] = value return norm_annotation # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width( images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> List[int]: """ Get the maximum height and width across all images in a batch. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if input_data_format == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_data_format == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_data_format}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask( image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray: """ Convert a COCO polygon annotation to a mask. Args: segmentations (`List[List[float]]`): List of polygons, each polygon represented by a list of x-y coordinates. height (`int`): Height of the mask. width (`int`): Width of the mask. """ try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->DETA def prepare_coco_detection_annotation( image, target, return_segmentation_masks: bool = False, input_data_format: Optional[Union[ChannelDimension, str]] = None, ): """ Convert the target in COCO format into the format expected by DETA. """ image_height, image_width = get_image_size(image, channel_dim=input_data_format) image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # Get all COCO annotations for the given image. annotations = target["annotations"] annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0] classes = [obj["category_id"] for obj in annotations] classes = np.asarray(classes, dtype=np.int64) # for conversion to coco api area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64) boxes = [obj["bbox"] for obj in annotations] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) new_target = {} new_target["image_id"] = image_id new_target["class_labels"] = classes[keep] new_target["boxes"] = boxes[keep] new_target["area"] = area[keep] new_target["iscrowd"] = iscrowd[keep] new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64) if annotations and "keypoints" in annotations[0]: keypoints = [obj["keypoints"] for obj in annotations] # Converting the filtered keypoints list to a numpy array keypoints = np.asarray(keypoints, dtype=np.float32) # Apply the keep mask here to filter the relevant annotations keypoints = keypoints[keep] num_keypoints = keypoints.shape[0] keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints new_target["keypoints"] = keypoints if return_segmentation_masks: segmentation_masks = [obj["segmentation"] for obj in annotations] masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width) new_target["masks"] = masks[keep] return new_target # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes def masks_to_boxes(masks: np.ndarray) -> np.ndarray: """ Compute the bounding boxes around the provided panoptic segmentation masks. Args: masks: masks in format `[number_masks, height, width]` where N is the number of masks Returns: boxes: bounding boxes in format `[number_masks, 4]` in xyxy format """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->DETA def prepare_coco_panoptic_annotation( image: np.ndarray, target: Dict, masks_path: Union[str, pathlib.Path], return_masks: bool = True, input_data_format: Union[ChannelDimension, str] = None, ) -> Dict: """ Prepare a coco panoptic annotation for DETA. """ image_height, image_width = get_image_size(image, channel_dim=input_data_format) annotation_path = pathlib.Path(masks_path) / target["file_name"] new_target = {} new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64) new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64) new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64) if "segments_info" in target: masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([segment_info["id"] for segment_info in target["segments_info"]]) masks = masks == ids[:, None, None] masks = masks.astype(np.uint8) if return_masks: new_target["masks"] = masks new_target["boxes"] = masks_to_boxes(masks) new_target["class_labels"] = np.array( [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["iscrowd"] = np.asarray( [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["area"] = np.asarray( [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32 ) return new_target # Copied from transformers.models.detr.image_processing_detr.resize_annotation def resize_annotation( annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float = 0.5, resample: PILImageResampling = PILImageResampling.NEAREST, ): """ Resizes an annotation to a target size. Args: annotation (`Dict[str, Any]`): The annotation dictionary. orig_size (`Tuple[int, int]`): The original size of the input image. target_size (`Tuple[int, int]`): The target size of the image, as returned by the preprocessing `resize` step. threshold (`float`, *optional*, defaults to 0.5): The threshold used to binarize the segmentation masks. resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`): The resampling filter to use when resizing the masks. """ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)) ratio_height, ratio_width = ratios new_annotation = {} new_annotation["size"] = target_size for key, value in annotation.items(): if key == "boxes": boxes = value scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) new_annotation["boxes"] = scaled_boxes elif key == "area": area = value scaled_area = area * (ratio_width * ratio_height) new_annotation["area"] = scaled_area elif key == "masks": masks = value[:, None] masks = np.array([resize(mask, target_size, resample=resample) for mask in masks]) masks = masks.astype(np.float32) masks = masks[:, 0] > threshold new_annotation["masks"] = masks elif key == "size": new_annotation["size"] = target_size else: new_annotation[key] = value return new_annotation class DetaImageProcessor(BaseImageProcessor): r""" Constructs a Deformable DETR image processor. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`): Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image to the largest image in a batch and create a pixel mask. Can be overridden by the `do_pad` parameter in the `preprocess` method. """ model_input_names = ["pixel_values", "pixel_mask"] def __init__( self, format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, do_pad: bool = True, **kwargs, ) -> None: if "pad_and_return_pixel_mask" in kwargs: do_pad = kwargs.pop("pad_and_return_pixel_mask") size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333} size = get_size_dict(size, default_to_square=False) super().__init__(**kwargs) self.format = format self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->DETA def prepare_annotation( self, image: np.ndarray, target: Dict, format: Optional[AnnotationFormat] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Dict: """ Prepare an annotation for feeding into DETA model. """ format = format if format is not None else self.format if format == AnnotationFormat.COCO_DETECTION: return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_detection_annotation( image, target, return_segmentation_masks, input_data_format=input_data_format ) elif format == AnnotationFormat.COCO_PANOPTIC: return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_panoptic_annotation( image, target, masks_path=masks_path, return_masks=return_segmentation_masks, input_data_format=input_data_format, ) else: raise ValueError(f"Format {format} is not supported.") return target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare def prepare(self, image, target, return_segmentation_masks=None, masks_path=None): logger.warning_once( "The `prepare` method is deprecated and will be removed in a v4.33. " "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method " "does not return the image anymore.", ) target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format) return image, target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask def convert_coco_poly_to_mask(self, *args, **kwargs): logger.warning_once("The `convert_coco_poly_to_mask` method is deprecated and will be removed in v4.33. ") return convert_coco_poly_to_mask(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection def prepare_coco_detection(self, *args, **kwargs): logger.warning_once("The `prepare_coco_detection` method is deprecated and will be removed in v4.33. ") return prepare_coco_detection_annotation(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic def prepare_coco_panoptic(self, *args, **kwargs): logger.warning_once("The `prepare_coco_panoptic` method is deprecated and will be removed in v4.33. ") return prepare_coco_panoptic_annotation(*args, **kwargs) def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): The desired output size. Can contain keys `shortest_edge` and `longest_edge` or `height` and `width`. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. data_format (`ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ size = get_size_dict(size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size = get_resize_output_image_size( image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format ) elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) image = resize( image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format ) return image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation def resize_annotation( self, annotation, orig_size, size, resample: PILImageResampling = PILImageResampling.NEAREST, ) -> Dict: """ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched to this number. """ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Rescale the image by the given factor. image = image * rescale_factor. Args: image (`np.ndarray`): Image to rescale. rescale_factor (`float`): The value to use for rescaling. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. If unset, is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict: """ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to `[center_x, center_y, width, height]` format. """ return normalize_annotation(annotation, image_size=image_size) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) return padded_image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> BatchFeature: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ pad_size = get_max_height_width(images, input_data_format=input_data_format) padded_images = [ self._pad_image( image, pad_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format) for image in images ] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) def preprocess( self, images: ImageInput, annotations: Optional[Union[List[Dict], List[List[Dict]]]] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample=None, # PILImageResampling do_rescale: Optional[bool] = None, rescale_factor: Optional[Union[int, float]] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, format: Optional[Union[str, AnnotationFormat]] = None, return_tensors: Optional[Union[TensorType, str]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> BatchFeature: """ Preprocess an image or a batch of images so that it can be used by the model. Args: images (`ImageInput`): Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. annotations (`List[Dict]` or `List[List[Dict]]`, *optional*): List of annotations associated with the image or batch of images. If annotation is for object detection, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a dictionary. An image can have no annotations, in which case the list should be empty. If annotation is for segmentation, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary. An image can have no segments, in which case the list should be empty. - "file_name" (`str`): The file name of the image. return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks): Whether to return segmentation masks. masks_path (`str` or `pathlib.Path`, *optional*): Path to the directory containing the segmentation masks. do_resize (`bool`, *optional*, defaults to self.do_resize): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to self.size): Size of the image after resizing. resample (`PILImageResampling`, *optional*, defaults to self.resample): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to self.do_rescale): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to self.rescale_factor): Rescale factor to use when rescaling the image. do_normalize (`bool`, *optional*, defaults to self.do_normalize): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean): Mean to use when normalizing the image. image_std (`float` or `List[float]`, *optional*, defaults to self.image_std): Standard deviation to use when normalizing the image. do_pad (`bool`, *optional*, defaults to self.do_pad): Whether to pad the image. format (`str` or `AnnotationFormat`, *optional*, defaults to self.format): Format of the annotations. return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors): Type of tensors to return. If `None`, will return the list of images. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ if "pad_and_return_pixel_mask" in kwargs: logger.warning_once( "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, " "use `do_pad` instead.", ) do_pad = kwargs.pop("pad_and_return_pixel_mask") do_resize = self.do_resize if do_resize is None else do_resize size = self.size if size is None else size size = get_size_dict(size=size, default_to_square=False) resample = self.resample if resample is None else resample do_rescale = self.do_rescale if do_rescale is None else do_rescale rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = self.do_normalize if do_normalize is None else do_normalize image_mean = self.image_mean if image_mean is None else image_mean image_std = self.image_std if image_std is None else image_std do_pad = self.do_pad if do_pad is None else do_pad format = self.format if format is None else format if do_resize is not None and size is None: raise ValueError("Size and max_size must be specified if do_resize is True.") if do_rescale is not None and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize is not None and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") if not is_batched(images): images = [images] annotations = [annotations] if annotations is not None else None if annotations is not None and len(images) != len(annotations): raise ValueError( f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match." ) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) format = AnnotationFormat(format) if annotations is not None: validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations) if ( masks_path is not None and format == AnnotationFormat.COCO_PANOPTIC and not isinstance(masks_path, (pathlib.Path, str)) ): raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" f" `pathlib.Path` or string object, but is {type(masks_path)} instead." ) # All transformations expect numpy arrays images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: prepared_images = [] prepared_annotations = [] for image, target in zip(images, annotations): target = self.prepare_annotation( image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path, input_data_format=input_data_format, ) prepared_images.append(image) prepared_annotations.append(target) images = prepared_images annotations = prepared_annotations del prepared_images, prepared_annotations # transformations if do_resize: if annotations is not None: resized_images, resized_annotations = [], [] for image, target in zip(images, annotations): orig_size = get_image_size(image, input_data_format) resized_image = self.resize( image, size=size, resample=resample, input_data_format=input_data_format ) resized_annotation = self.resize_annotation( target, orig_size, get_image_size(resized_image, input_data_format) ) resized_images.append(resized_image) resized_annotations.append(resized_annotation) images = resized_images annotations = resized_annotations del resized_images, resized_annotations else: images = [ self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images ] if annotations is not None: annotations = [ self.normalize_annotation(annotation, get_image_size(image, input_data_format)) for annotation, image in zip(annotations, images) ] if do_pad: # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...} data = self.pad( images, return_pixel_mask=True, data_format=data_format, input_data_format=input_data_format ) else: images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: encoded_inputs["labels"] = [ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations ] return encoded_inputs def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None, nms_threshold: float = 0.7, ): """ Converts the output of [`DetaForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. nms_threshold (`float`, *optional*, defaults to 0.7): NMS threshold. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes batch_size, num_queries, num_labels = out_logits.shape if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = out_logits.sigmoid() all_scores = prob.view(batch_size, num_queries * num_labels).to(out_logits.device) all_indexes = torch.arange(num_queries * num_labels)[None].repeat(batch_size, 1).to(out_logits.device) all_boxes = torch.div(all_indexes, out_logits.shape[2], rounding_mode="floor") all_labels = all_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, all_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for b in range(batch_size): box = boxes[b] score = all_scores[b] lbls = all_labels[b] pre_topk = score.topk(min(10000, num_queries * num_labels)).indices box = box[pre_topk] score = score[pre_topk] lbls = lbls[pre_topk] # apply NMS keep_inds = batched_nms(box, score, lbls, nms_threshold)[:100] score = score[keep_inds] lbls = lbls[keep_inds] box = box[keep_inds] results.append( { "scores": score[score > threshold], "labels": lbls[score > threshold], "boxes": box[score > threshold], } ) return results
transformers/src/transformers/models/deta/image_processing_deta.py/0
{ "file_path": "transformers/src/transformers/models/deta/image_processing_deta.py", "repo_id": "transformers", "token_count": 20208 }
328
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert DINOv2 checkpoints from the original repository. URL: https://github.com/facebookresearch/dinov2/tree/main """ import argparse import json from pathlib import Path import requests import torch import torch.nn as nn from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, Dinov2Config, Dinov2ForImageClassification, Dinov2Model from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_dinov2_config(model_name, image_classifier=False): config = Dinov2Config(image_size=518, patch_size=14) # size of the architecture if "vits" in model_name: config.hidden_size = 384 config.num_attention_heads = 6 elif "vitb" in model_name: pass elif "vitl" in model_name: config.hidden_size = 1024 config.num_hidden_layers = 24 config.num_attention_heads = 16 elif "vitg" in model_name: config.use_swiglu_ffn = True config.hidden_size = 1536 config.num_hidden_layers = 40 config.num_attention_heads = 24 else: raise ValueError("Model not supported") if image_classifier: repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" config.num_labels = 1000 config.id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) config.id2label = {int(k): v for k, v in config.id2label.items()} return config def create_rename_keys(config): rename_keys = [] # fmt: off # patch embedding layer rename_keys.append(("cls_token", "embeddings.cls_token")) rename_keys.append(("mask_token", "embeddings.mask_token")) rename_keys.append(("pos_embed", "embeddings.position_embeddings")) rename_keys.append(("patch_embed.proj.weight", "embeddings.patch_embeddings.projection.weight")) rename_keys.append(("patch_embed.proj.bias", "embeddings.patch_embeddings.projection.bias")) for i in range(config.num_hidden_layers): # layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"encoder.layer.{i}.norm1.weight")) rename_keys.append((f"blocks.{i}.norm1.bias", f"encoder.layer.{i}.norm1.bias")) rename_keys.append((f"blocks.{i}.norm2.weight", f"encoder.layer.{i}.norm2.weight")) rename_keys.append((f"blocks.{i}.norm2.bias", f"encoder.layer.{i}.norm2.bias")) # MLP if config.use_swiglu_ffn: rename_keys.append((f"blocks.{i}.mlp.w12.weight", f"encoder.layer.{i}.mlp.w12.weight")) rename_keys.append((f"blocks.{i}.mlp.w12.bias", f"encoder.layer.{i}.mlp.w12.bias")) rename_keys.append((f"blocks.{i}.mlp.w3.weight", f"encoder.layer.{i}.mlp.w3.weight")) rename_keys.append((f"blocks.{i}.mlp.w3.bias", f"encoder.layer.{i}.mlp.w3.bias")) else: rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"encoder.layer.{i}.mlp.fc1.weight")) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"encoder.layer.{i}.mlp.fc1.bias")) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"encoder.layer.{i}.mlp.fc2.weight")) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"encoder.layer.{i}.mlp.fc2.bias")) # layerscale rename_keys.append((f"blocks.{i}.ls1.gamma", f"encoder.layer.{i}.layer_scale1.lambda1")) rename_keys.append((f"blocks.{i}.ls2.gamma", f"encoder.layer.{i}.layer_scale2.lambda1")) # attention projection layer rename_keys.append((f"blocks.{i}.attn.proj.weight", f"encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"encoder.layer.{i}.attention.output.dense.bias")) # final layernorm rename_keys.append(("norm.weight", "layernorm.weight")) rename_keys.append(("norm.bias", "layernorm.bias")) # fmt: on return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config): for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :] state_dict[f"encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] state_dict[f"encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] state_dict[f"encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-config.hidden_size :, :] state_dict[f"encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @torch.no_grad() def convert_dinov2_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False): """ Copy/paste/tweak model's weights to our DINOv2 structure. """ # define default Dinov2 configuration image_classifier = "1layer" in model_name config = get_dinov2_config(model_name, image_classifier=image_classifier) # load original model from torch hub original_model = torch.hub.load("facebookresearch/dinov2", model_name.replace("_1layer", "")) original_model.eval() # load state_dict of original model, remove and rename some keys state_dict = original_model.state_dict() rename_keys = create_rename_keys(config) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config) for key, val in state_dict.copy().items(): val = state_dict.pop(key) if "w12" in key: key = key.replace("w12", "weights_in") if "w3" in key: key = key.replace("w3", "weights_out") state_dict[key] = val # load HuggingFace model if image_classifier: model = Dinov2ForImageClassification(config).eval() model.dinov2.load_state_dict(state_dict) model_name_to_classifier_dict_url = { "dinov2_vits14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_linear_head.pth", "dinov2_vitb14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_linear_head.pth", "dinov2_vitl14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_linear_head.pth", "dinov2_vitg14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_linear_head.pth", } url = model_name_to_classifier_dict_url[model_name] classifier_state_dict = torch.hub.load_state_dict_from_url(url, map_location="cpu") model.classifier.weight = nn.Parameter(classifier_state_dict["weight"]) model.classifier.bias = nn.Parameter(classifier_state_dict["bias"]) else: model = Dinov2Model(config).eval() model.load_state_dict(state_dict) # load image url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") # preprocess image transformations = transforms.Compose( [ transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=IMAGENET_DEFAULT_MEAN, # these are RGB mean+std values std=IMAGENET_DEFAULT_STD, # across a large photo dataset. ), ] ) original_pixel_values = transformations(image).unsqueeze(0) # insert batch dimension processor = BitImageProcessor( size={"shortest_edge": 256}, resample=PILImageResampling.BICUBIC, image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD, ) pixel_values = processor(image, return_tensors="pt").pixel_values assert torch.allclose(original_pixel_values, pixel_values) with torch.no_grad(): outputs = model(pixel_values, output_hidden_states=True) original_outputs = original_model(pixel_values) # assert values if image_classifier: print("Predicted class:") class_idx = outputs.logits.argmax(-1).item() print(model.config.id2label[class_idx]) else: assert outputs.last_hidden_state[:, 0].shape == original_outputs.shape assert torch.allclose(outputs.last_hidden_state[:, 0], original_outputs, atol=1e-3) print("Looks ok!") if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model_name_to_hf_name = { "dinov2_vits14": "dinov2-small", "dinov2_vitb14": "dinov2-base", "dinov2_vitl14": "dinov2-large", "dinov2_vitg14": "dinov2-giant", "dinov2_vits14_1layer": "dinov2-small-imagenet1k-1-layer", "dinov2_vitb14_1layer": "dinov2-base-imagenet1k-1-layer", "dinov2_vitl14_1layer": "dinov2-large-imagenet1k-1-layer", "dinov2_vitg14_1layer": "dinov2-giant-imagenet1k-1-layer", } name = model_name_to_hf_name[model_name] model.push_to_hub(f"facebook/{name}") processor.push_to_hub(f"facebook/{name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="dinov2_vitb14", type=str, choices=[ "dinov2_vits14", "dinov2_vitb14", "dinov2_vitl14", "dinov2_vitg14", "dinov2_vits14_1layer", "dinov2_vitb14_1layer", "dinov2_vitl14_1layer", "dinov2_vitg14_1layer", ], help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_dinov2_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/dinov2/convert_dinov2_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/dinov2/convert_dinov2_to_hf.py", "repo_id": "transformers", "token_count": 5255 }
329
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Donut Swin Transformer model. This implementation is identical to a regular Swin Transformer, without final layer norm on top of the final hidden states.""" import collections.abc import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_donut_swin import DonutSwinConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "DonutSwinConfig" # Base docstring _CHECKPOINT_FOR_DOC = "https://huggingface.co/naver-clova-ix/donut-base" _EXPECTED_OUTPUT_SHAPE = [1, 49, 768] DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = [ "naver-clova-ix/donut-base", # See all Donut Swin models at https://huggingface.co/models?filter=donut ] @dataclass # Copied from transformers.models.swin.modeling_swin.SwinEncoderOutput with Swin->DonutSwin class DonutSwinEncoderOutput(ModelOutput): """ DonutSwin encoder's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass # Copied from transformers.models.swin.modeling_swin.SwinModelOutput with Swin->DonutSwin class DonutSwinModelOutput(ModelOutput): """ DonutSwin model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None # Copied from transformers.models.swin.modeling_swin.window_partition def window_partition(input_feature, window_size): """ Partitions the given input into windows. """ batch_size, height, width, num_channels = input_feature.shape input_feature = input_feature.view( batch_size, height // window_size, window_size, width // window_size, window_size, num_channels ) windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) return windows # Copied from transformers.models.swin.modeling_swin.window_reverse def window_reverse(windows, window_size, height, width): """ Merges windows to produce higher resolution features. """ num_channels = windows.shape[-1] windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels) windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels) return windows # Copied from transformers.models.swin.modeling_swin.SwinEmbeddings with Swin->DonutSwin class DonutSwinEmbeddings(nn.Module): """ Construct the patch and position embeddings. Optionally, also the mask token. """ def __init__(self, config, use_mask_token=False): super().__init__() self.patch_embeddings = DonutSwinPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.patch_grid = self.patch_embeddings.grid_size self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None if config.use_absolute_embeddings: self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim)) else: self.position_embeddings = None self.norm = nn.LayerNorm(config.embed_dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None ) -> Tuple[torch.Tensor]: embeddings, output_dimensions = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) batch_size, seq_len, _ = embeddings.size() if bool_masked_pos is not None: mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_tokens mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask if self.position_embeddings is not None: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings, output_dimensions # Copied from transformers.models.swin.modeling_swin.SwinPatchEmbeddings class DonutSwinPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.embed_dim image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def maybe_pad(self, pixel_values, height, width): if width % self.patch_size[1] != 0: pad_values = (0, self.patch_size[1] - width % self.patch_size[1]) pixel_values = nn.functional.pad(pixel_values, pad_values) if height % self.patch_size[0] != 0: pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0]) pixel_values = nn.functional.pad(pixel_values, pad_values) return pixel_values def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]: _, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # pad the input to be divisible by self.patch_size, if needed pixel_values = self.maybe_pad(pixel_values, height, width) embeddings = self.projection(pixel_values) _, _, height, width = embeddings.shape output_dimensions = (height, width) embeddings = embeddings.flatten(2).transpose(1, 2) return embeddings, output_dimensions # Copied from transformers.models.swin.modeling_swin.SwinPatchMerging class DonutSwinPatchMerging(nn.Module): """ Patch Merging Layer. Args: input_resolution (`Tuple[int]`): Resolution of input feature. dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. """ def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def maybe_pad(self, input_feature, height, width): should_pad = (height % 2 == 1) or (width % 2 == 1) if should_pad: pad_values = (0, 0, 0, width % 2, 0, height % 2) input_feature = nn.functional.pad(input_feature, pad_values) return input_feature def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor: height, width = input_dimensions # `dim` is height * width batch_size, dim, num_channels = input_feature.shape input_feature = input_feature.view(batch_size, height, width, num_channels) # pad input to be disible by width and height, if needed input_feature = self.maybe_pad(input_feature, height, width) # [batch_size, height/2, width/2, num_channels] input_feature_0 = input_feature[:, 0::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_1 = input_feature[:, 1::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_2 = input_feature[:, 0::2, 1::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_3 = input_feature[:, 1::2, 1::2, :] # batch_size height/2 width/2 4*num_channels input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C input_feature = self.norm(input_feature) input_feature = self.reduction(input_feature) return input_feature # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.swin.modeling_swin.SwinDropPath class DonutSwinDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) # Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->DonutSwin class DonutSwinSelfAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() if dim % num_heads != 0: raise ValueError( f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" ) self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.window_size = ( window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) ) self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads) ) # get pair-wise relative position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) coords_flatten = torch.flatten(coords, 1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.permute(1, 2, 0).contiguous() relative_coords[:, :, 0] += self.window_size[0] - 1 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) self.register_buffer("relative_position_index", relative_position_index) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: batch_size, dim, num_channels = hidden_states.shape mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)] relative_position_bias = relative_position_bias.view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 ) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() attention_scores = attention_scores + relative_position_bias.unsqueeze(0) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in DonutSwinModel forward() function) mask_shape = attention_mask.shape[0] attention_scores = attention_scores.view( batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim ) attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0) attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.swin.modeling_swin.SwinSelfOutput class DonutSwinSelfOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->DonutSwin class DonutSwinAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() self.self = DonutSwinSelfAttention(config, dim, num_heads, window_size) self.output = DonutSwinSelfOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.swin.modeling_swin.SwinIntermediate class DonutSwinIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.swin.modeling_swin.SwinOutput class DonutSwinOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.swin.modeling_swin.SwinLayer with Swin->DonutSwin class DonutSwinLayer(nn.Module): def __init__(self, config, dim, input_resolution, num_heads, shift_size=0): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.shift_size = shift_size self.window_size = config.window_size self.input_resolution = input_resolution self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = DonutSwinAttention(config, dim, num_heads, window_size=self.window_size) self.drop_path = DonutSwinDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = DonutSwinIntermediate(config, dim) self.output = DonutSwinOutput(config, dim) def set_shift_and_window_size(self, input_resolution): if min(input_resolution) <= self.window_size: # if window size is larger than input resolution, we don't partition windows self.shift_size = 0 self.window_size = min(input_resolution) def get_attn_mask(self, height, width, dtype): if self.shift_size > 0: # calculate attention mask for SW-MSA img_mask = torch.zeros((1, height, width, 1), dtype=dtype) height_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) width_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) count = 0 for height_slice in height_slices: for width_slice in width_slices: img_mask[:, height_slice, width_slice, :] = count count += 1 mask_windows = window_partition(img_mask, self.window_size) mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None return attn_mask def maybe_pad(self, hidden_states, height, width): pad_right = (self.window_size - width % self.window_size) % self.window_size pad_bottom = (self.window_size - height % self.window_size) % self.window_size pad_values = (0, 0, 0, pad_right, 0, pad_bottom) hidden_states = nn.functional.pad(hidden_states, pad_values) return hidden_states, pad_values def forward( self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, always_partition: Optional[bool] = False, ) -> Tuple[torch.Tensor, torch.Tensor]: if not always_partition: self.set_shift_and_window_size(input_dimensions) else: pass height, width = input_dimensions batch_size, _, channels = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) hidden_states = hidden_states.view(batch_size, height, width, channels) # pad hidden_states to multiples of window size hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape # cyclic shift if self.shift_size > 0: shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_hidden_states = hidden_states # partition windows hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype) if attn_mask is not None: attn_mask = attn_mask.to(hidden_states_windows.device) attention_outputs = self.attention( hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions ) attention_output = attention_outputs[0] attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels) shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad) # reverse cyclic shift if self.shift_size > 0: attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: attention_windows = shifted_windows was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_windows = attention_windows[:, :height, :width, :].contiguous() attention_windows = attention_windows.view(batch_size, height * width, channels) hidden_states = shortcut + self.drop_path(attention_windows) layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = hidden_states + self.output(layer_output) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs # Copied from transformers.models.swin.modeling_swin.SwinStage with Swin->DonutSwin class DonutSwinStage(nn.Module): def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample): super().__init__() self.config = config self.dim = dim self.blocks = nn.ModuleList( [ DonutSwinLayer( config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, shift_size=0 if (i % 2 == 0) else config.window_size // 2, ) for i in range(depth) ] ) # patch merging layer if downsample is not None: self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward( self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, always_partition: Optional[bool] = False, ) -> Tuple[torch.Tensor]: height, width = input_dimensions for i, layer_module in enumerate(self.blocks): layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition ) hidden_states = layer_outputs[0] hidden_states_before_downsampling = hidden_states if self.downsample is not None: height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2 output_dimensions = (height, width, height_downsampled, width_downsampled) hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions) else: output_dimensions = (height, width, height, width) stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs # Copied from transformers.models.swin.modeling_swin.SwinEncoder with Swin->DonutSwin class DonutSwinEncoder(nn.Module): def __init__(self, config, grid_size): super().__init__() self.num_layers = len(config.depths) self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] self.layers = nn.ModuleList( [ DonutSwinStage( config=config, dim=int(config.embed_dim * 2**i_layer), input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], downsample=DonutSwinPatchMerging if (i_layer < self.num_layers - 1) else None, ) for i_layer in range(self.num_layers) ] ) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, output_hidden_states_before_downsampling: Optional[bool] = False, always_partition: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, DonutSwinEncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: batch_size, _, hidden_size = hidden_states.shape # rearrange b (h w) c -> b c h w reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, layer_module in enumerate(self.layers): layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition, ) else: layer_outputs = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition ) hidden_states = layer_outputs[0] hidden_states_before_downsampling = layer_outputs[1] output_dimensions = layer_outputs[2] input_dimensions = (output_dimensions[-2], output_dimensions[-1]) if output_hidden_states and output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states_before_downsampling.shape # rearrange b (h w) c -> b c h w # here we use the original (not downsampled) height and width reshaped_hidden_state = hidden_states_before_downsampling.view( batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size ) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and not output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states.shape # rearrange b (h w) c -> b c h w reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[3:] if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return DonutSwinEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states, ) # Copied from transformers.models.swin.modeling_swin.SwinPreTrainedModel with Swin->DonutSwin class DonutSwinPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DonutSwinConfig base_model_prefix = "swin" main_input_name = "pixel_values" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) SWIN_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`DonutSwinConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SWIN_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`DonutImageProcessor.__call__`] for details. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Donut Swin Model transformer outputting raw hidden-states without any specific head on top.", SWIN_START_DOCSTRING, ) class DonutSwinModel(DonutSwinPreTrainedModel): def __init__(self, config, add_pooling_layer=True, use_mask_token=False): super().__init__(config) self.config = config self.num_layers = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1)) self.embeddings = DonutSwinEmbeddings(config, use_mask_token=use_mask_token) self.encoder = DonutSwinEncoder(config, self.embeddings.patch_grid) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=DonutSwinModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, DonutSwinModelOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, len(self.config.depths)) embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos) encoder_outputs = self.encoder( embedding_output, input_dimensions, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return DonutSwinModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, )
transformers/src/transformers/models/donut/modeling_donut_swin.py/0
{ "file_path": "transformers/src/transformers/models/donut/modeling_donut_swin.py", "repo_id": "transformers", "token_count": 18061 }
330
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ELECTRA checkpoint.""" import argparse import torch from transformers import ElectraConfig, ElectraForMaskedLM, ElectraForPreTraining, load_tf_weights_in_electra from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path, discriminator_or_generator): # Initialise PyTorch model config = ElectraConfig.from_json_file(config_file) print(f"Building PyTorch model from configuration: {config}") if discriminator_or_generator == "discriminator": model = ElectraForPreTraining(config) elif discriminator_or_generator == "generator": model = ElectraForMaskedLM(config) else: raise ValueError("The discriminator_or_generator argument should be either 'discriminator' or 'generator'") # Load weights from tf checkpoint load_tf_weights_in_electra( model, config, tf_checkpoint_path, discriminator_or_generator=discriminator_or_generator ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--discriminator_or_generator", default=None, type=str, required=True, help=( "Whether to export the generator or the discriminator. Should be a string, either 'discriminator' or " "'generator'." ), ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.discriminator_or_generator )
transformers/src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 1018 }
331
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available _import_structure = { "configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_ernie"] = [ "ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST", "ErnieForCausalLM", "ErnieForMaskedLM", "ErnieForMultipleChoice", "ErnieForNextSentencePrediction", "ErnieForPreTraining", "ErnieForQuestionAnswering", "ErnieForSequenceClassification", "ErnieForTokenClassification", "ErnieModel", "ErniePreTrainedModel", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/ernie/__init__.py/0
{ "file_path": "transformers/src/transformers/models/ernie/__init__.py", "repo_id": "transformers", "token_count": 927 }
332
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, Tuple, overload import torch import torch.types from torch import nn from . import residue_constants as rc from .rigid_utils import Rigid, Rotation from .tensor_utils import batched_gather @overload def pseudo_beta_fn(aatype: torch.Tensor, all_atom_positions: torch.Tensor, all_atom_masks: None) -> torch.Tensor: ... @overload def pseudo_beta_fn( aatype: torch.Tensor, all_atom_positions: torch.Tensor, all_atom_masks: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: ... def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks): is_gly = aatype == rc.restype_order["G"] ca_idx = rc.atom_order["CA"] cb_idx = rc.atom_order["CB"] pseudo_beta = torch.where( is_gly[..., None].expand(*((-1,) * len(is_gly.shape)), 3), all_atom_positions[..., ca_idx, :], all_atom_positions[..., cb_idx, :], ) if all_atom_masks is not None: pseudo_beta_mask = torch.where( is_gly, all_atom_masks[..., ca_idx], all_atom_masks[..., cb_idx], ) return pseudo_beta, pseudo_beta_mask else: return pseudo_beta def atom14_to_atom37(atom14: torch.Tensor, batch: Dict[str, torch.Tensor]) -> torch.Tensor: atom37_data = batched_gather( atom14, batch["residx_atom37_to_atom14"], dim=-2, no_batch_dims=len(atom14.shape[:-2]), ) atom37_data = atom37_data * batch["atom37_atom_exists"][..., None] return atom37_data def build_template_angle_feat(template_feats: Dict[str, torch.Tensor]) -> torch.Tensor: template_aatype = template_feats["template_aatype"] torsion_angles_sin_cos = template_feats["template_torsion_angles_sin_cos"] alt_torsion_angles_sin_cos = template_feats["template_alt_torsion_angles_sin_cos"] torsion_angles_mask = template_feats["template_torsion_angles_mask"] template_angle_feat = torch.cat( [ nn.functional.one_hot(template_aatype, 22), torsion_angles_sin_cos.reshape(*torsion_angles_sin_cos.shape[:-2], 14), alt_torsion_angles_sin_cos.reshape(*alt_torsion_angles_sin_cos.shape[:-2], 14), torsion_angles_mask, ], dim=-1, ) return template_angle_feat def build_template_pair_feat( batch: Dict[str, torch.Tensor], min_bin: torch.types.Number, max_bin: torch.types.Number, no_bins: int, use_unit_vector: bool = False, eps: float = 1e-20, inf: float = 1e8, ) -> torch.Tensor: template_mask = batch["template_pseudo_beta_mask"] template_mask_2d = template_mask[..., None] * template_mask[..., None, :] # Compute distogram (this seems to differ slightly from Alg. 5) tpb = batch["template_pseudo_beta"] dgram = torch.sum((tpb[..., None, :] - tpb[..., None, :, :]) ** 2, dim=-1, keepdim=True) lower = torch.linspace(min_bin, max_bin, no_bins, device=tpb.device) ** 2 upper = torch.cat([lower[1:], lower.new_tensor([inf])], dim=-1) dgram = ((dgram > lower) * (dgram < upper)).type(dgram.dtype) to_concat = [dgram, template_mask_2d[..., None]] aatype_one_hot: torch.LongTensor = nn.functional.one_hot( batch["template_aatype"], rc.restype_num + 2, ) n_res = batch["template_aatype"].shape[-1] to_concat.append(aatype_one_hot[..., None, :, :].expand(*aatype_one_hot.shape[:-2], n_res, -1, -1)) to_concat.append(aatype_one_hot[..., None, :].expand(*aatype_one_hot.shape[:-2], -1, n_res, -1)) n, ca, c = [rc.atom_order[a] for a in ["N", "CA", "C"]] rigids = Rigid.make_transform_from_reference( n_xyz=batch["template_all_atom_positions"][..., n, :], ca_xyz=batch["template_all_atom_positions"][..., ca, :], c_xyz=batch["template_all_atom_positions"][..., c, :], eps=eps, ) points = rigids.get_trans()[..., None, :, :] rigid_vec = rigids[..., None].invert_apply(points) inv_distance_scalar = torch.rsqrt(eps + torch.sum(rigid_vec**2, dim=-1)) t_aa_masks = batch["template_all_atom_mask"] template_mask = t_aa_masks[..., n] * t_aa_masks[..., ca] * t_aa_masks[..., c] template_mask_2d = template_mask[..., None] * template_mask[..., None, :] inv_distance_scalar = inv_distance_scalar * template_mask_2d unit_vector = rigid_vec * inv_distance_scalar[..., None] if not use_unit_vector: unit_vector = unit_vector * 0.0 to_concat.extend(torch.unbind(unit_vector[..., None, :], dim=-1)) to_concat.append(template_mask_2d[..., None]) act = torch.cat(to_concat, dim=-1) act = act * template_mask_2d[..., None] return act def build_extra_msa_feat(batch: Dict[str, torch.Tensor]) -> torch.Tensor: msa_1hot: torch.LongTensor = nn.functional.one_hot(batch["extra_msa"], 23) msa_feat = [ msa_1hot, batch["extra_has_deletion"].unsqueeze(-1), batch["extra_deletion_value"].unsqueeze(-1), ] return torch.cat(msa_feat, dim=-1) def torsion_angles_to_frames( r: Rigid, alpha: torch.Tensor, aatype: torch.Tensor, rrgdf: torch.Tensor, ) -> Rigid: # [*, N, 8, 4, 4] default_4x4 = rrgdf[aatype, ...] # [*, N, 8] transformations, i.e. # One [*, N, 8, 3, 3] rotation matrix and # One [*, N, 8, 3] translation matrix default_r = r.from_tensor_4x4(default_4x4) bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2)) bb_rot[..., 1] = 1 # [*, N, 8, 2] alpha = torch.cat([bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2) # [*, N, 8, 3, 3] # Produces rotation matrices of the form: # [ # [1, 0 , 0 ], # [0, a_2,-a_1], # [0, a_1, a_2] # ] # This follows the original code rather than the supplement, which uses # different indices. all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape) all_rots[..., 0, 0] = 1 all_rots[..., 1, 1] = alpha[..., 1] all_rots[..., 1, 2] = -alpha[..., 0] all_rots[..., 2, 1:] = alpha all_frames = default_r.compose(Rigid(Rotation(rot_mats=all_rots), None)) chi2_frame_to_frame = all_frames[..., 5] chi3_frame_to_frame = all_frames[..., 6] chi4_frame_to_frame = all_frames[..., 7] chi1_frame_to_bb = all_frames[..., 4] chi2_frame_to_bb = chi1_frame_to_bb.compose(chi2_frame_to_frame) chi3_frame_to_bb = chi2_frame_to_bb.compose(chi3_frame_to_frame) chi4_frame_to_bb = chi3_frame_to_bb.compose(chi4_frame_to_frame) all_frames_to_bb = Rigid.cat( [ all_frames[..., :5], chi2_frame_to_bb.unsqueeze(-1), chi3_frame_to_bb.unsqueeze(-1), chi4_frame_to_bb.unsqueeze(-1), ], dim=-1, ) all_frames_to_global = r[..., None].compose(all_frames_to_bb) return all_frames_to_global def frames_and_literature_positions_to_atom14_pos( r: Rigid, aatype: torch.Tensor, default_frames: torch.Tensor, group_idx: torch.Tensor, atom_mask: torch.Tensor, lit_positions: torch.Tensor, ) -> torch.Tensor: # [*, N, 14] group_mask = group_idx[aatype, ...] # [*, N, 14, 8] group_mask_one_hot: torch.LongTensor = nn.functional.one_hot( group_mask, num_classes=default_frames.shape[-3], ) # [*, N, 14, 8] t_atoms_to_global = r[..., None, :] * group_mask_one_hot # [*, N, 14] t_atoms_to_global = t_atoms_to_global.map_tensor_fn(lambda x: torch.sum(x, dim=-1)) # [*, N, 14, 1] atom_mask = atom_mask[aatype, ...].unsqueeze(-1) # [*, N, 14, 3] lit_positions = lit_positions[aatype, ...] pred_positions = t_atoms_to_global.apply(lit_positions) pred_positions = pred_positions * atom_mask return pred_positions
transformers/src/transformers/models/esm/openfold_utils/feats.py/0
{ "file_path": "transformers/src/transformers/models/esm/openfold_utils/feats.py", "repo_id": "transformers", "token_count": 3763 }
333
# coding=utf-8 # Copyright 2023 The Espnet authors, IMS Toucan authors, and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch FastSpeech2Conformer model.""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from torch import nn from ...modeling_outputs import BaseModelOutput from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, add_start_docstrings, logging, replace_return_docstrings from .configuration_fastspeech2_conformer import ( FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig, ) logger = logging.get_logger(__name__) FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "espnet/fastspeech2_conformer", # See all FastSpeech2Conformer models at https://huggingface.co/models?filter=fastspeech2_conformer ] @dataclass class FastSpeech2ConformerModelOutput(ModelOutput): """ Output type of [`FastSpeech2ConformerModel`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Spectrogram generation loss. spectrogram (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_bins)`): The predicted spectrogram. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length + 1)`, *optional*): Outputs of the duration predictor. pitch_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*): Outputs of the pitch predictor. energy_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*): Outputs of the energy predictor. """ loss: Optional[torch.FloatTensor] = None spectrogram: torch.FloatTensor = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None duration_outputs: torch.LongTensor = None pitch_outputs: torch.FloatTensor = None energy_outputs: torch.FloatTensor = None @dataclass class FastSpeech2ConformerWithHifiGanOutput(FastSpeech2ConformerModelOutput): """ Output type of [`FastSpeech2ConformerWithHifiGan`]. Args: waveform (`torch.FloatTensor` of shape `(batch_size, audio_length)`): Speech output as a result of passing the predicted mel spectrogram through the vocoder. loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Spectrogram generation loss. spectrogram (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_bins)`): The predicted spectrogram. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length + 1)`, *optional*): Outputs of the duration predictor. pitch_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*): Outputs of the pitch predictor. energy_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*): Outputs of the energy predictor. """ waveform: torch.FloatTensor = None _CONFIG_FOR_DOC = "FastSpeech2ConformerConfig" FASTSPEECH2_CONFORMER_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`FastSpeech2ConformerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ HIFIGAN_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`FastSpeech2ConformerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ FASTSPEECH2_CONFORMER_WITH_HIFIGAN_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`FastSpeech2ConformerWithHifiGanConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ def length_regulator(encoded_embeddings, duration_labels, speaking_speed=1.0): """ Length regulator for feed-forward Transformer. This is the length regulator module described in `FastSpeech: Fast, Robust and Controllable Text to Speech` https://arxiv.org/pdf/1905.09263.pdf. The length regulator expands char or phoneme-level embedding features to frame-level by repeating each feature based on the corresponding predicted durations. Args: encoded_embeddings (`torch.Tensor` of shape `(batch_size, max_text_length, embedding_dim)`): Batch of sequences of char or phoneme embeddings. duration_labels (`torch.LongTensor` of shape `(batch_size, time)`): Batch of durations of each frame. speaking_speed (`float`, *optional*, defaults to 1.0): Value to control speed of speech. Returns: `torch.Tensor`: Replicated input tensor based on durations (batch_size, time*, embedding_dim). """ if speaking_speed <= 0: raise ValueError("`speaking_speed` must be greater than 0.") elif speaking_speed != 1.0: duration_labels = torch.round(duration_labels.float() * speaking_speed).long() if duration_labels.sum() == 0: duration_labels[duration_labels.sum(dim=1).eq(0)] = 1 # Calculate the maximum length needed max_len = torch.sum(duration_labels, dim=1).max() # Create a padded tensor to hold the results hidden_states = torch.zeros( (encoded_embeddings.size(0), max_len, encoded_embeddings.size(2)), dtype=torch.float, device=encoded_embeddings.device, ) # Loop through the batch and fill in the data for i, (encoded_embedding, target_duration) in enumerate(zip(encoded_embeddings, duration_labels)): repeated = torch.repeat_interleave(encoded_embedding, target_duration, dim=0) hidden_states[i, : repeated.size(0)] = repeated return hidden_states class FastSpeech2ConformerDurationPredictor(nn.Module): """ Duration predictor module. This is a module of duration predictor described in the paper 'FastSpeech: Fast, Robust and Controllable Text to Speech' https://arxiv.org/pdf/1905.09263.pdf The duration predictor predicts a duration of each frame in log domain from the hidden embeddings of encoder. Note: The calculation domain of outputs is different between in `forward` and in `inference`. In `forward`, the outputs are calculated in log domain but in `inference`, those are calculated in linear domain. """ def __init__(self, config: FastSpeech2ConformerConfig): super().__init__() self.conv_layers = nn.ModuleList() self.log_domain_offset = 1.0 for layer_idx in range(config.duration_predictor_layers): num_chans = config.duration_predictor_channels input_channels = config.hidden_size if layer_idx == 0 else num_chans layer = FastSpeech2ConformerPredictorLayer( input_channels, num_chans, config.duration_predictor_kernel_size, config.duration_predictor_dropout_rate, ) self.conv_layers.append(layer) self.linear = nn.Linear(config.duration_predictor_channels, 1) def forward(self, encoder_hidden_states): """ Args: hidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`): Batch of input sequences. padding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*): Batch of masks indicating padded part. Returns: `torch.Tensor`: Batch of predicted durations in log domain `(batch_size, max_text_length)`. """ # (batch_size, input_dim, max_text_length) hidden_states = encoder_hidden_states.transpose(1, -1) for layer in self.conv_layers: hidden_states = layer(hidden_states) # NOTE: calculate in log domain, (batch_size, max_text_length) hidden_states = self.linear(hidden_states.transpose(1, -1)).squeeze(-1) if not self.training: # NOTE: calculate in linear domain hidden_states = torch.clamp(torch.round(hidden_states.exp() - self.log_domain_offset), min=0).long() return hidden_states # Copied from transformers.models.speecht5.modeling_speecht5.SpeechT5BatchNormConvLayer class FastSpeech2ConformerBatchNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() if layer_id == 0: in_conv_dim = config.num_mel_bins else: in_conv_dim = config.speech_decoder_postnet_units if layer_id == config.speech_decoder_postnet_layers - 1: out_conv_dim = config.num_mel_bins else: out_conv_dim = config.speech_decoder_postnet_units self.conv = nn.Conv1d( in_conv_dim, out_conv_dim, kernel_size=config.speech_decoder_postnet_kernel, stride=1, padding=(config.speech_decoder_postnet_kernel - 1) // 2, bias=False, ) self.batch_norm = nn.BatchNorm1d(out_conv_dim) if layer_id < config.speech_decoder_postnet_layers - 1: self.activation = nn.Tanh() else: self.activation = None self.dropout = nn.Dropout(config.speech_decoder_postnet_dropout) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.batch_norm(hidden_states) if self.activation is not None: hidden_states = self.activation(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class FastSpeech2ConformerSpeechDecoderPostnet(nn.Module): def __init__(self, config): super().__init__() self.config = config self.feat_out = nn.Linear(config.hidden_size, config.num_mel_bins * config.reduction_factor) self.layers = nn.ModuleList( [FastSpeech2ConformerBatchNormConvLayer(config, i) for i in range(config.speech_decoder_postnet_layers)] ) def forward(self, hidden_states: torch.Tensor): outputs_before_postnet = self.feat_out(hidden_states).view(hidden_states.size(0), -1, self.config.num_mel_bins) layer_output = outputs_before_postnet.transpose(1, 2) for layer in self.layers: layer_output = layer(layer_output) outputs_after_postnet = outputs_before_postnet + layer_output.transpose(1, 2) return outputs_before_postnet, outputs_after_postnet class FastSpeech2ConformerPredictorLayer(nn.Module): def __init__(self, input_channels, num_chans, kernel_size, dropout_rate): super().__init__() self.conv = nn.Conv1d( input_channels, num_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2, ) self.activation = nn.ReLU() self.layer_norm = nn.LayerNorm(num_chans) self.dropout = nn.Dropout(dropout_rate) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) # Perform layer norm on dimension 1 hidden_states = hidden_states.transpose(1, -1) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.transpose(1, -1) hidden_states = self.dropout(hidden_states) return hidden_states class FastSpeech2ConformerVariancePredictor(nn.Module): def __init__( self, config: FastSpeech2ConformerConfig, num_layers=2, num_chans=384, kernel_size=3, dropout_rate=0.5, ): """ Initilize variance predictor module. Args: input_dim (`int`): Input dimension. num_layers (`int`, *optional*, defaults to 2): Number of convolutional layers. num_chans (`int`, *optional*, defaults to 384): Number of channels of convolutional layers. kernel_size (`int`, *optional*, defaults to 3): Kernel size of convolutional layers. dropout_rate (`float`, *optional*, defaults to 0.5): Dropout rate. """ super().__init__() self.conv_layers = nn.ModuleList() for idx in range(num_layers): input_channels = config.hidden_size if idx == 0 else num_chans layer = FastSpeech2ConformerPredictorLayer(input_channels, num_chans, kernel_size, dropout_rate) self.conv_layers.append(layer) self.linear = nn.Linear(num_chans, 1) def forward(self, encoder_hidden_states, padding_masks=None): """ Calculate forward propagation. Args: encoder_hidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`): Batch of input sequences. padding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*): Batch of masks indicating padded part. Returns: Tensor: Batch of predicted sequences `(batch_size, max_text_length, 1)`. """ # (batch_size, input_dim, max_text_length) hidden_states = encoder_hidden_states.transpose(1, -1) for layer in self.conv_layers: hidden_states = layer(hidden_states) hidden_states = self.linear(hidden_states.transpose(1, 2)) if padding_masks is not None: hidden_states = hidden_states.masked_fill(padding_masks, 0.0) return hidden_states class FastSpeech2ConformerVarianceEmbedding(nn.Module): def __init__( self, in_channels=1, out_channels=384, kernel_size=1, padding=0, dropout_rate=0.0, ): super().__init__() self.conv = nn.Conv1d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding, ) self.dropout = nn.Dropout(dropout_rate) def forward(self, hidden_states): hidden_states = hidden_states.transpose(1, 2) hidden_states = self.conv(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states class FastSpeech2ConformerAttention(nn.Module): """ Multi-Head attention layer with relative position encoding. Details can be found in https://github.com/espnet/espnet/pull/2816. Paper: https://arxiv.org/abs/1901.02860. """ def __init__(self, config: FastSpeech2ConformerConfig, module_config): """Construct an FastSpeech2ConformerAttention object.""" super().__init__() # We assume d_v always equals dim_key self.num_heads = module_config["num_attention_heads"] self.hidden_size = config.hidden_size self.dim_key = self.hidden_size // self.num_heads self.head_dim = self.hidden_size // self.num_heads self.linear_q = nn.Linear(self.hidden_size, self.hidden_size) self.linear_k = nn.Linear(self.hidden_size, self.hidden_size) self.linear_v = nn.Linear(self.hidden_size, self.hidden_size) self.linear_out = nn.Linear(self.hidden_size, self.hidden_size) self.dropout = nn.Dropout(p=module_config["attention_dropout_rate"]) # linear transformation for positional encoding self.linear_pos = nn.Linear(self.hidden_size, self.hidden_size, bias=False) # these two learnable bias are used in matrix c and matrix d # as described in https://arxiv.org/abs/1901.02860 Section 3.3 self.pos_bias_u = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim)) self.pos_bias_v = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim)) def shift_relative_position_tensor(self, pos_tensor): """ Args: pos_tensor (torch.Tensor of shape (batch_size, head, time1, 2*time1-1)): Input tensor. """ zero_pad = torch.zeros((*pos_tensor.size()[:3], 1), device=pos_tensor.device, dtype=pos_tensor.dtype) pos_tensor_padded = torch.cat([zero_pad, pos_tensor], dim=-1) pos_tensor_padded = pos_tensor_padded.view(*pos_tensor.size()[:2], pos_tensor.size(3) + 1, pos_tensor.size(2)) # only keep the positions from 0 to time2 pos_tensor = pos_tensor_padded[:, :, 1:].view_as(pos_tensor)[:, :, :, : pos_tensor.size(-1) // 2 + 1] return pos_tensor def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, pos_emb: Optional[torch.Tensor] = None, output_attentions: Optional[torch.Tensor] = False, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Compute 'Scaled Dot Product Attention' with rel. positional encoding. Args: hidden_states (`torch.Tensor` of shape `(batch, time2, size)`): Values of the hidden states attention_mask (`torch.Tensor` of shape `(batch, time1, time2)`): Mask tensor. pos_emb (`torch.Tensor` of shape `(batch, 2*time1-1, size)`): Positional embedding tensor. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. Returns: `torch.Tensor`: Output tensor of shape `(batch, time1, d_model)`. """ bsz, q_len, _ = hidden_states.size() query_states = self.linear_q(hidden_states).view(bsz, -1, self.num_heads, self.head_dim) key_states = self.linear_k(hidden_states).view(bsz, -1, self.num_heads, self.head_dim) value_states = self.linear_v(hidden_states).view(bsz, -1, self.num_heads, self.head_dim) bsz_pos = pos_emb.size(0) pos_encoding = self.linear_pos(pos_emb).view(bsz_pos, -1, self.num_heads, self.head_dim) # (batch_size, head, time1, dim_key) query_with_bias_u = (query_states + self.pos_bias_u).transpose(1, 2) # (batch_size, head, time1, dim_key) query_with_bias_v = (query_states + self.pos_bias_v).transpose(1, 2) # compute attention score # first compute matrix a and matrix c # as described in https://arxiv.org/abs/1901.02860 Section 3.3 # (batch_size, head, time1, time2) matrix_ac = torch.matmul(query_with_bias_u, key_states.permute(0, 2, 3, 1)) # compute matrix b and matrix d # (batch_size, head, time1, 2*time1-1) matrix_bd = torch.matmul(query_with_bias_v, pos_encoding.permute(0, 2, 3, 1)) matrix_bd = self.shift_relative_position_tensor(matrix_bd) # (batch_size, head, time1, time2) scores = (matrix_ac + matrix_bd) / math.sqrt(self.dim_key) # Forward attention if attention_mask is not None: expected_size = (bsz, 1, q_len) if attention_mask.size() != expected_size: raise ValueError(f"Attention mask should be of size {expected_size}, but is {attention_mask.size()}") attention_mask = attention_mask.unsqueeze(1).eq(0) min_value = float(torch.finfo(scores.dtype).min) scores = scores.masked_fill(attention_mask, min_value) attn_weights = torch.softmax(scores, dim=-1).masked_fill(attention_mask, 0.0) else: attn_weights = torch.softmax(scores, dim=-1) attn_weights = self.dropout(attn_weights) attn_output = torch.matmul(attn_weights, value_states.transpose(1, 2)) attn_output = attn_output.transpose(1, 2).contiguous().view(bsz, q_len, -1) attn_output = self.linear_out(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights class FastSpeech2ConformerConvolutionModule(nn.Module): def __init__(self, config: FastSpeech2ConformerConfig, module_config): super().__init__() # kernel_size should be an odd number for 'SAME' padding channels = config.hidden_size kernel_size = module_config["kernel_size"] self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels, kernel_size=1, stride=1, padding=0, bias=True) self.depthwise_conv = nn.Conv1d( channels, channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias=True ) self.norm = nn.BatchNorm1d(channels) self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=True) def forward(self, hidden_states): """ Compute convolution module. Args: hidden_states (`torch.Tensor` of shape `(batch, time, channels)`): Input tensor. Returns: `torch.Tensor`: Output tensor of shape `(batch, time, channels)`. """ # exchange the temporal dimension and the feature dimension hidden_states = hidden_states.transpose(1, 2) # GLU mechanism, (batch_size, 2*channel, dim) hidden_states = self.pointwise_conv1(hidden_states) # (batch_size, channel, dim) hidden_states = nn.functional.glu(hidden_states, dim=1) # 1D Depthwise Conv hidden_states = self.depthwise_conv(hidden_states) hidden_states = self.norm(hidden_states) hidden_states = hidden_states * torch.sigmoid(hidden_states) hidden_states = self.pointwise_conv2(hidden_states) return hidden_states.transpose(1, 2) class FastSpeech2ConformerEncoderLayer(nn.Module): def __init__(self, config: FastSpeech2ConformerConfig, module_config): super().__init__() # self-attention module definition self.self_attn = FastSpeech2ConformerAttention(config, module_config) # feed-forward module definition self.feed_forward = FastSpeech2ConformerMultiLayeredConv1d(config, module_config) self.macaron_style = config.use_macaron_style_in_conformer if self.macaron_style: self.feed_forward_macaron = FastSpeech2ConformerMultiLayeredConv1d(config, module_config) self.ff_macaron_layer_norm = nn.LayerNorm(config.hidden_size) self.ff_scale = 0.5 else: self.ff_scale = 1.0 # convolution module definition self.use_cnn_module = config.use_cnn_in_conformer if self.use_cnn_module: self.conv_module = FastSpeech2ConformerConvolutionModule(config, module_config) self.conv_layer_norm = nn.LayerNorm(config.hidden_size) self.final_layer_norm = nn.LayerNorm(config.hidden_size) self.ff_layer_norm = nn.LayerNorm(config.hidden_size) self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size) self.dropout = nn.Dropout(module_config["dropout_rate"]) self.size = config.hidden_size self.normalize_before = module_config["normalize_before"] self.concat_after = module_config["concat_after"] if self.concat_after: self.concat_linear = nn.Linear(config.hidden_size + config.hidden_size, config.hidden_size) def forward( self, hidden_states: torch.Tensor, pos_emb: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[torch.Tensor] = False, ): """ Compute encoded features. Args: hidden_states (`torch.Tensor` of shape `(batch, time, size)`): Input tensor. pos_emb (`torch.Tensor` of shape `(1, time, size)`): Positional embeddings tensor. attention_mask (`torch.Tensor` of shape `(batch, time)`): Attention mask tensor for the input. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. Returns: `torch.Tensor`: Output tensor of shape `(batch, time, size)`. """ # whether to use macaron style if self.macaron_style: residual = hidden_states if self.normalize_before: hidden_states = self.ff_macaron_layer_norm(hidden_states) hidden_states = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(hidden_states)) if not self.normalize_before: hidden_states = self.ff_macaron_layer_norm(hidden_states) # multi-headed self-attention module residual = hidden_states if self.normalize_before: hidden_states = self.self_attn_layer_norm(hidden_states) attention_output, attention_scores = self.self_attn( hidden_states, attention_mask=attention_mask, pos_emb=pos_emb, output_attentions=output_attentions ) if self.concat_after: x_concat = torch.cat((hidden_states, attention_output), dim=-1) hidden_states = self.concat_linear(x_concat) hidden_states = residual + hidden_states else: hidden_states = self.dropout(attention_output) hidden_states = residual + hidden_states if not self.normalize_before: hidden_states = self.self_attn_layer_norm(hidden_states) # convolution module if self.use_cnn_module: residual = hidden_states if self.normalize_before: hidden_states = self.conv_layer_norm(hidden_states) hidden_states = self.conv_module(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = residual + hidden_states if not self.normalize_before: hidden_states = self.conv_layer_norm(hidden_states) # feed forward module residual = hidden_states if self.normalize_before: hidden_states = self.ff_layer_norm(hidden_states) hidden_states = self.feed_forward(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = residual + self.ff_scale * hidden_states if not self.normalize_before: hidden_states = self.ff_layer_norm(hidden_states) if self.conv_module is not None: hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attention_scores,) return outputs class FastSpeech2ConformerMultiLayeredConv1d(nn.Module): """ Multi-layered conv1d for Transformer block. This is a module of multi-layered conv1d designed to replace positionwise feed-forward network in Transformer block, which is introduced in 'FastSpeech: Fast, Robust and Controllable Text to Speech' https://arxiv.org/pdf/1905.09263.pdf """ def __init__(self, config: FastSpeech2ConformerConfig, module_config): """ Initialize FastSpeech2ConformerMultiLayeredConv1d module. Args: input_channels (`int`): Number of input channels. hidden_channels (`int`): Number of hidden channels. kernel_size (`int`): Kernel size of conv1d. dropout_rate (`float`): Dropout rate. """ super().__init__() input_channels = config.hidden_size hidden_channels = module_config["linear_units"] kernel_size = config.positionwise_conv_kernel_size self.conv1 = nn.Conv1d(input_channels, hidden_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2) self.conv2 = nn.Conv1d(hidden_channels, input_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2) self.dropout = nn.Dropout(module_config["dropout_rate"]) def forward(self, hidden_states): """ Calculate forward propagation. Args: hidden_states (torch.Tensor): Batch of input tensors (batch_size, time, input_channels). Returns: torch.Tensor: Batch of output tensors (batch_size, time, hidden_channels). """ hidden_states = hidden_states.transpose(-1, 1) hidden_states = self.conv1(hidden_states) hidden_states = torch.relu(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = hidden_states.transpose(-1, 1) return hidden_states class FastSpeech2ConformerRelPositionalEncoding(nn.Module): """ Args: Relative positional encoding module (new implementation). Details can be found in https://github.com/espnet/espnet/pull/2816. See : Appendix Batch in https://arxiv.org/abs/1901.02860 config (`FastSpeech2ConformerConfig`): FastSpeech2ConformerConfig instance. module_config (`dict`): Dictionary containing the encoder or decoder module configuration from the `FastSpeech2ConformerConfig`. """ def __init__(self, config: FastSpeech2ConformerConfig, module_config): """ Construct an PositionalEncoding object. """ super().__init__() self.embed_dim = config.hidden_size self.input_scale = math.sqrt(self.embed_dim) self.dropout = nn.Dropout(p=module_config["positional_dropout_rate"]) self.pos_enc = None self.max_len = 5000 self.extend_pos_enc(torch.tensor(0.0).expand(1, self.max_len)) def extend_pos_enc(self, x): """Reset the positional encodings.""" if self.pos_enc is not None: # self.pos_enc contains both positive and negative parts # the length of self.pos_enc is 2 * input_len - 1 if self.pos_enc.size(1) >= x.size(1) * 2 - 1: if self.pos_enc.dtype != x.dtype or self.pos_enc.device != x.device: self.pos_enc = self.pos_enc.to(dtype=x.dtype, device=x.device) return # Suppose `i` means to the position of query vector and `j` means the # position of key vector. We use position relative positions when keys # are to the left (i>j) and negative relative positions otherwise (i<j). pos_enc_positive = torch.zeros(x.size(1), self.embed_dim) pos_enc_negative = torch.zeros(x.size(1), self.embed_dim) position = torch.arange(0, x.size(1), dtype=torch.int64).float().unsqueeze(1) div_term = torch.exp( torch.arange(0, self.embed_dim, 2, dtype=torch.int64).float() * -(math.log(10000.0) / self.embed_dim) ) pos_enc_positive[:, 0::2] = torch.sin(position * div_term) pos_enc_positive[:, 1::2] = torch.cos(position * div_term) pos_enc_negative[:, 0::2] = torch.sin(-1 * position * div_term) pos_enc_negative[:, 1::2] = torch.cos(-1 * position * div_term) # Reserve the order of positive indices and concat both positive and # negative indices. This is used to support the shifting trick # as in https://arxiv.org/abs/1901.02860 pos_enc_positive = torch.flip(pos_enc_positive, [0]).unsqueeze(0) pos_enc_negative = pos_enc_negative[1:].unsqueeze(0) pos_enc = torch.cat([pos_enc_positive, pos_enc_negative], dim=1) self.pos_enc = pos_enc.to(device=x.device, dtype=x.dtype) def forward(self, feature_representation): """ Args: feature_representation (`torch.Tensor` of shape (batch_size, time, `*`)): Input tensor. Returns: `torch.Tensor`: Encoded tensor (batch_size, time, `*`). """ self.extend_pos_enc(feature_representation) hidden_states = feature_representation * self.input_scale center_idx = self.pos_enc.size(1) // 2 pos_emb = self.pos_enc[:, center_idx - hidden_states.size(1) + 1 : center_idx + hidden_states.size(1)] return self.dropout(hidden_states), self.dropout(pos_emb) class FastSpeech2ConformerEncoder(nn.Module): """ FastSpeech2ConformerEncoder encoder module. Args: config (`FastSpeech2ConformerConfig`): FastSpeech2ConformerConfig instance. module_config (`dict`): Dictionary containing the encoder or decoder module configuration from the `FastSpeech2ConformerConfig`. use_encoder_input_layer (`bool`, *optional*, defaults to `False`): Input layer type. """ def __init__( self, config: FastSpeech2ConformerConfig, module_config, use_encoder_input_layer=False, ): super().__init__() self.embed = None if use_encoder_input_layer: self.embed = nn.Embedding( num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, padding_idx=0 ) self.pos_enc = FastSpeech2ConformerRelPositionalEncoding(config, module_config) self.conformer_layers = nn.ModuleList( [FastSpeech2ConformerEncoderLayer(config, module_config) for _ in range(module_config["layers"])] ) def forward( self, input_tensor: torch.LongTensor, attention_mask: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = False, return_dict: Optional[bool] = None, ): """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: `torch.Tensor`: Output tensor of shape `(batch, time, attention_dim)`. """ feature_representation = input_tensor if self.embed is not None: feature_representation = self.embed(feature_representation) hidden_states, pos_emb = self.pos_enc(feature_representation) all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for conformer_layer in self.conformer_layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = conformer_layer(hidden_states, pos_emb, attention_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions ) class FastSpeech2ConformerLoss(nn.Module): def __init__(self, config: FastSpeech2ConformerConfig): super().__init__() use_masking = config.use_masking use_weighted_masking = config.use_weighted_masking if use_masking and use_weighted_masking: raise ValueError("Either use_masking or use_weighted_masking can be True, but not both.") self.use_masking = use_masking self.use_weighted_masking = use_weighted_masking # define criterions reduction = "none" if self.use_weighted_masking else "mean" self.l1_criterion = nn.L1Loss(reduction=reduction) self.mse_criterion = nn.MSELoss(reduction=reduction) self.duration_criterion = nn.MSELoss(reduction=reduction) self.log_domain_offset = 1.0 def forward( self, outputs_after_postnet, outputs_before_postnet, duration_outputs, pitch_outputs, energy_outputs, spectrogram_labels, duration_labels, pitch_labels, energy_labels, duration_mask, spectrogram_mask, ): """ Args: outputs_after_postnet (`torch.Tensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`): Batch of outputs after postnet. outputs_before_postnet (`torch.Tensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`): Batch of outputs before postnet. duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length)`): Batch of outputs of duration predictor. pitch_outputs (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`): Batch of outputs of pitch predictor. energy_outputs (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`): Batch of outputs of energy predictor. spectrogram_labels (`torch.Tensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`): Batch of target features. duration_labels (`torch.LongTensor` of shape `(batch_size, max_text_length)`): Batch of durations. pitch_labels (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`): Batch of target token-averaged pitch. energy_labels (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`): Batch of target token-averaged energy. duration_mask (`torch.LongTensor`): Mask used to discern which values the duration loss should be calculated for. spectrogram_mask (`torch.LongTensor`): Mask used to discern which values the spectrogam loss should be calculated for. Returns: `tuple(torch.FloatTensor)`: Tuple of tensors containing, in order, the L1 loss value, duration predictor loss value, pitch predictor loss value, and energy predictor loss value. """ pitch_and_energy_masks = duration_mask.unsqueeze(-1) # apply mask to remove padded part if self.use_masking: outputs_before_postnet = outputs_before_postnet.masked_select(spectrogram_mask) if outputs_after_postnet is not None: outputs_after_postnet = outputs_after_postnet.masked_select(spectrogram_mask) spectrogram_labels = spectrogram_labels.masked_select(spectrogram_mask) duration_outputs = duration_outputs.masked_select(duration_mask) duration_labels = duration_labels.masked_select(duration_mask) pitch_outputs = pitch_outputs.masked_select(pitch_and_energy_masks) energy_outputs = energy_outputs.masked_select(pitch_and_energy_masks) pitch_labels = pitch_labels.masked_select(pitch_and_energy_masks) energy_labels = energy_labels.masked_select(pitch_and_energy_masks) # calculate loss l1_loss = self.l1_criterion(outputs_before_postnet, spectrogram_labels) if outputs_after_postnet is not None: l1_loss = l1_loss + self.l1_criterion(outputs_after_postnet, spectrogram_labels) duration_labels = torch.log(duration_labels.float() + self.log_domain_offset) duration_loss = self.duration_criterion(duration_outputs, duration_labels) pitch_loss = self.mse_criterion(pitch_outputs, pitch_labels) energy_loss = self.mse_criterion(energy_outputs, energy_labels) # make weighted mask and apply it if self.use_weighted_masking: spectrogram_mask = nn.functional.pad( spectrogram_mask.transpose(1, 2), [0, spectrogram_labels.size(1) - spectrogram_mask.size(1), 0, 0, 0, 0], value=False, ).transpose(1, 2) out_weights = spectrogram_mask.float() / spectrogram_mask.sum(dim=1, keepdim=True).float() out_weights /= spectrogram_labels.size(0) * spectrogram_labels.size(2) duration_weights = duration_mask.float() / duration_mask.sum(dim=1, keepdim=True).float() duration_weights /= duration_labels.size(0) # apply weight l1_loss = l1_loss.mul(out_weights).masked_select(spectrogram_mask).sum() duration_loss = duration_loss.mul(duration_weights).masked_select(duration_mask).sum() pitch_weights = duration_weights.unsqueeze(-1) pitch_loss = pitch_loss.mul(pitch_weights).masked_select(pitch_and_energy_masks).sum() energy_loss = energy_loss.mul(pitch_weights).masked_select(pitch_and_energy_masks).sum() return l1_loss + duration_loss + pitch_loss + energy_loss class FastSpeech2ConformerPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = FastSpeech2ConformerConfig base_model_prefix = "fastspeech2_conformer" main_input_name = "input_ids" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.LayerNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: key = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-key, b=key) elif isinstance(module, nn.Embedding): module.weight.data.normal_() if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, FastSpeech2ConformerAttention): nn.init.xavier_uniform_(module.pos_bias_u) nn.init.xavier_uniform_(module.pos_bias_v) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, FastSpeech2ConformerEncoder): module.gradient_checkpointing = value @add_start_docstrings( """FastSpeech2Conformer Model.""", FASTSPEECH2_CONFORMER_START_DOCSTRING, ) class FastSpeech2ConformerModel(FastSpeech2ConformerPreTrainedModel): """ FastSpeech 2 module. This is a module of FastSpeech 2 described in 'FastSpeech 2: Fast and High-Quality End-to-End Text to Speech' https://arxiv.org/abs/2006.04558. Instead of quantized pitch and energy, we use token-averaged value introduced in FastPitch: Parallel Text-to-speech with Pitch Prediction. The encoder and decoder are Conformers instead of regular Transformers. """ def __init__(self, config: FastSpeech2ConformerConfig): super().__init__(config) self.config = config # store hyperparameters self.vocab_size = config.vocab_size self.num_mel_bins = config.num_mel_bins self.hidden_size = config.hidden_size self.reduction_factor = config.reduction_factor self.stop_gradient_from_pitch_predictor = config.stop_gradient_from_pitch_predictor self.stop_gradient_from_energy_predictor = config.stop_gradient_from_energy_predictor self.multilingual_model = config.num_languages is not None and config.num_languages > 1 if self.multilingual_model: self.language_id_embedding = torch.nn.Embedding(config.num_languages, self.hidden_size) self.multispeaker_model = config.num_speakers is not None and config.num_speakers > 1 if self.multispeaker_model: self.speaker_id_embedding = torch.nn.Embedding(config.num_speakers, config.hidden_size) self.speaker_embed_dim = config.speaker_embed_dim if self.speaker_embed_dim: self.projection = nn.Linear(config.hidden_size + self.speaker_embed_dim, config.hidden_size) self.encoder = FastSpeech2ConformerEncoder(config, config.encoder_config, use_encoder_input_layer=True) self.duration_predictor = FastSpeech2ConformerDurationPredictor(config) self.pitch_predictor = FastSpeech2ConformerVariancePredictor( config, num_layers=config.pitch_predictor_layers, num_chans=config.pitch_predictor_channels, kernel_size=config.pitch_predictor_kernel_size, dropout_rate=config.pitch_predictor_dropout, ) # continuous pitch + FastPitch style avg self.pitch_embed = FastSpeech2ConformerVarianceEmbedding( out_channels=self.hidden_size, kernel_size=config.pitch_embed_kernel_size, padding=(config.pitch_embed_kernel_size - 1) // 2, dropout_rate=config.pitch_embed_dropout, ) self.energy_predictor = FastSpeech2ConformerVariancePredictor( config, num_layers=config.energy_predictor_layers, num_chans=config.energy_predictor_channels, kernel_size=config.energy_predictor_kernel_size, dropout_rate=config.energy_predictor_dropout, ) # continuous energy + FastPitch style avg self.energy_embed = FastSpeech2ConformerVarianceEmbedding( out_channels=self.hidden_size, kernel_size=config.energy_embed_kernel_size, padding=(config.energy_embed_kernel_size - 1) // 2, dropout_rate=config.energy_embed_dropout, ) # The decoder is an encoder self.decoder = FastSpeech2ConformerEncoder(config, config.decoder_config, use_encoder_input_layer=False) self.speech_decoder_postnet = FastSpeech2ConformerSpeechDecoderPostnet(config) self.criterion = FastSpeech2ConformerLoss(config) self.post_init() @replace_return_docstrings(output_type=FastSpeech2ConformerModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor, attention_mask: Optional[torch.LongTensor] = None, spectrogram_labels: Optional[torch.FloatTensor] = None, duration_labels: Optional[torch.LongTensor] = None, pitch_labels: Optional[torch.FloatTensor] = None, energy_labels: Optional[torch.FloatTensor] = None, speaker_ids: Optional[torch.LongTensor] = None, lang_ids: Optional[torch.LongTensor] = None, speaker_embedding: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> Union[Tuple, FastSpeech2ConformerModelOutput]: """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Input sequence of text vectors. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*, defaults to `None`): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: 0 for tokens that are **masked**, 1 for tokens that are **not masked**. spectrogram_labels (`torch.FloatTensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`, *optional*, defaults to `None`): Batch of padded target features. duration_labels (`torch.LongTensor` of shape `(batch_size, sequence_length + 1)`, *optional*, defaults to `None`): Batch of padded durations. pitch_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`): Batch of padded token-averaged pitch. energy_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`): Batch of padded token-averaged energy. speaker_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`): Speaker ids used to condition features of speech output by the model. lang_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`): Language ids used to condition features of speech output by the model. speaker_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`, *optional*, defaults to `None`): Embedding containing conditioning signals for the features of the speech. return_dict (`bool`, *optional*, defaults to `None`): Whether or not to return a [`FastSpeech2ConformerModelOutput`] instead of a plain tuple. output_attentions (`bool`, *optional*, defaults to `None`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*, defaults to `None`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. Returns: Example: ```python >>> from transformers import ( ... FastSpeech2ConformerTokenizer, ... FastSpeech2ConformerModel, ... FastSpeech2ConformerHifiGan, ... ) >>> tokenizer = FastSpeech2ConformerTokenizer.from_pretrained("espnet/fastspeech2_conformer") >>> inputs = tokenizer("some text to convert to speech", return_tensors="pt") >>> input_ids = inputs["input_ids"] >>> model = FastSpeech2ConformerModel.from_pretrained("espnet/fastspeech2_conformer") >>> output_dict = model(input_ids, return_dict=True) >>> spectrogram = output_dict["spectrogram"] >>> vocoder = FastSpeech2ConformerHifiGan.from_pretrained("espnet/fastspeech2_conformer_hifigan") >>> waveform = vocoder(spectrogram) >>> print(waveform.shape) torch.Size([1, 49664]) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if attention_mask is None: attention_mask = torch.ones(input_ids.shape) has_missing_labels = ( spectrogram_labels is None or duration_labels is None or pitch_labels is None or energy_labels is None ) if self.training and has_missing_labels: raise ValueError("All labels must be provided to run in training mode.") # forward encoder text_masks = attention_mask.unsqueeze(-2) encoder_outputs = self.encoder( input_ids, text_masks, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) hidden_states = encoder_outputs[0] # Integrate with language id, speaker id, and speaker embedding if self.multispeaker_model and speaker_ids is not None: speaker_id_embeddings = self.speaker_id_embedding(speaker_ids.view(-1)) hidden_states = hidden_states + speaker_id_embeddings.unsqueeze(1) if self.multilingual_model and lang_ids is not None: language_id_embbedings = self.language_id_embedding(lang_ids.view(-1)) hidden_states = hidden_states + language_id_embbedings.unsqueeze(1) if self.speaker_embed_dim is not None and speaker_embedding is not None: embeddings_expanded = ( nn.functional.normalize(speaker_embedding).unsqueeze(1).expand(-1, hidden_states.size(1), -1) ) hidden_states = self.projection(torch.cat([hidden_states, embeddings_expanded], dim=-1)) # forward duration predictor and variance predictors duration_mask = ~attention_mask.bool() if self.stop_gradient_from_pitch_predictor: pitch_predictions = self.pitch_predictor(hidden_states.detach(), duration_mask.unsqueeze(-1)) else: pitch_predictions = self.pitch_predictor(hidden_states, duration_mask.unsqueeze(-1)) if self.stop_gradient_from_energy_predictor: energy_predictions = self.energy_predictor(hidden_states.detach(), duration_mask.unsqueeze(-1)) else: energy_predictions = self.energy_predictor(hidden_states, duration_mask.unsqueeze(-1)) duration_predictions = self.duration_predictor(hidden_states) duration_predictions = duration_predictions.masked_fill(duration_mask, 0.0) if not self.training: # use prediction in inference embedded_pitch_curve = self.pitch_embed(pitch_predictions) embedded_energy_curve = self.energy_embed(energy_predictions) hidden_states = hidden_states + embedded_energy_curve + embedded_pitch_curve hidden_states = length_regulator(hidden_states, duration_predictions, self.config.speaking_speed) else: # use groundtruth in training embedded_pitch_curve = self.pitch_embed(pitch_labels) embedded_energy_curve = self.energy_embed(energy_labels) hidden_states = hidden_states + embedded_energy_curve + embedded_pitch_curve hidden_states = length_regulator(hidden_states, duration_labels) # forward decoder if not self.training: hidden_mask = None else: spectrogram_mask = (spectrogram_labels != -100).any(dim=-1) spectrogram_mask = spectrogram_mask.int() if self.reduction_factor > 1: length_dim = spectrogram_mask.shape[1] - spectrogram_mask.shape[1] % self.reduction_factor spectrogram_mask = spectrogram_mask[:, :, :length_dim] hidden_mask = spectrogram_mask.unsqueeze(-2) decoder_outputs = self.decoder( hidden_states, hidden_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) outputs_before_postnet, outputs_after_postnet = self.speech_decoder_postnet(decoder_outputs[0]) loss = None if self.training: # calculate loss loss_duration_mask = ~duration_mask loss_spectrogram_mask = spectrogram_mask.unsqueeze(-1).bool() loss = self.criterion( outputs_after_postnet=outputs_after_postnet, outputs_before_postnet=outputs_before_postnet, duration_outputs=duration_predictions, pitch_outputs=pitch_predictions, energy_outputs=energy_predictions, spectrogram_labels=spectrogram_labels, duration_labels=duration_labels, pitch_labels=pitch_labels, energy_labels=energy_labels, duration_mask=loss_duration_mask, spectrogram_mask=loss_spectrogram_mask, ) if not return_dict: postnet_outputs = (outputs_after_postnet,) audio_feature_predictions = ( duration_predictions, pitch_predictions, energy_predictions, ) outputs = postnet_outputs + encoder_outputs + decoder_outputs[1:] + audio_feature_predictions return ((loss,) + outputs) if loss is not None else outputs return FastSpeech2ConformerModelOutput( loss=loss, spectrogram=outputs_after_postnet, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, duration_outputs=duration_predictions, pitch_outputs=pitch_predictions, energy_outputs=energy_predictions, ) # Copied from transformers.models.speecht5.modeling_speecht5.HifiGanResidualBlock class HifiGanResidualBlock(nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1): super().__init__() self.leaky_relu_slope = leaky_relu_slope self.convs1 = nn.ModuleList( [ nn.Conv1d( channels, channels, kernel_size, stride=1, dilation=dilation[i], padding=self.get_padding(kernel_size, dilation[i]), ) for i in range(len(dilation)) ] ) self.convs2 = nn.ModuleList( [ nn.Conv1d( channels, channels, kernel_size, stride=1, dilation=1, padding=self.get_padding(kernel_size, 1), ) for _ in range(len(dilation)) ] ) def get_padding(self, kernel_size, dilation=1): return (kernel_size * dilation - dilation) // 2 def apply_weight_norm(self): for layer in self.convs1: nn.utils.weight_norm(layer) for layer in self.convs2: nn.utils.weight_norm(layer) def remove_weight_norm(self): for layer in self.convs1: nn.utils.remove_weight_norm(layer) for layer in self.convs2: nn.utils.remove_weight_norm(layer) def forward(self, hidden_states): for conv1, conv2 in zip(self.convs1, self.convs2): residual = hidden_states hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = conv1(hidden_states) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = conv2(hidden_states) hidden_states = hidden_states + residual return hidden_states @add_start_docstrings( """HiFi-GAN vocoder.""", HIFIGAN_START_DOCSTRING, ) # Copied from transformers.models.speecht5.modeling_speecht5.SpeechT5HifiGan with SpeechT5->FastSpeech2Conformer class FastSpeech2ConformerHifiGan(PreTrainedModel): config_class = FastSpeech2ConformerHifiGanConfig main_input_name = "spectrogram" def __init__(self, config: FastSpeech2ConformerHifiGanConfig): super().__init__(config) self.num_kernels = len(config.resblock_kernel_sizes) self.num_upsamples = len(config.upsample_rates) self.conv_pre = nn.Conv1d( config.model_in_dim, config.upsample_initial_channel, kernel_size=7, stride=1, padding=3, ) self.upsampler = nn.ModuleList() for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)): self.upsampler.append( nn.ConvTranspose1d( config.upsample_initial_channel // (2**i), config.upsample_initial_channel // (2 ** (i + 1)), kernel_size=kernel_size, stride=upsample_rate, padding=(kernel_size - upsample_rate) // 2, ) ) self.resblocks = nn.ModuleList() for i in range(len(self.upsampler)): channels = config.upsample_initial_channel // (2 ** (i + 1)) for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes): self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope)) self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3) self.register_buffer("mean", torch.zeros(config.model_in_dim)) self.register_buffer("scale", torch.ones(config.model_in_dim)) # Initialize weights and apply final processing self.post_init() def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() def apply_weight_norm(self): nn.utils.weight_norm(self.conv_pre) for layer in self.upsampler: nn.utils.weight_norm(layer) for layer in self.resblocks: layer.apply_weight_norm() nn.utils.weight_norm(self.conv_post) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.conv_pre) for layer in self.upsampler: nn.utils.remove_weight_norm(layer) for layer in self.resblocks: layer.remove_weight_norm() nn.utils.remove_weight_norm(self.conv_post) def forward(self, spectrogram: torch.FloatTensor) -> torch.FloatTensor: r""" Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech waveform. Args: spectrogram (`torch.FloatTensor`): Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length, config.model_in_dim)`, or un-batched and of shape `(sequence_length, config.model_in_dim)`. Returns: `torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`. """ if self.config.normalize_before: spectrogram = (spectrogram - self.mean) / self.scale is_batched = spectrogram.dim() == 3 if not is_batched: spectrogram = spectrogram.unsqueeze(0) hidden_states = spectrogram.transpose(2, 1) hidden_states = self.conv_pre(hidden_states) for i in range(self.num_upsamples): hidden_states = nn.functional.leaky_relu(hidden_states, self.config.leaky_relu_slope) hidden_states = self.upsampler[i](hidden_states) res_state = self.resblocks[i * self.num_kernels](hidden_states) for j in range(1, self.num_kernels): res_state += self.resblocks[i * self.num_kernels + j](hidden_states) hidden_states = res_state / self.num_kernels hidden_states = nn.functional.leaky_relu(hidden_states) hidden_states = self.conv_post(hidden_states) hidden_states = torch.tanh(hidden_states) if not is_batched: # remove batch dim and collapse tensor to 1-d audio waveform waveform = hidden_states.squeeze(0).transpose(1, 0).view(-1) else: # remove seq-len dim since this collapses to 1 waveform = hidden_states.squeeze(1) return waveform @add_start_docstrings( "The FastSpeech2ConformerModel with a FastSpeech2ConformerHifiGan vocoder head that performs text-to-speech (waveform).", FASTSPEECH2_CONFORMER_WITH_HIFIGAN_START_DOCSTRING, ) class FastSpeech2ConformerWithHifiGan(PreTrainedModel): config_class = FastSpeech2ConformerWithHifiGanConfig def __init__(self, config: FastSpeech2ConformerWithHifiGanConfig): super().__init__(config) self.model = FastSpeech2ConformerModel(config.model_config) self.vocoder = FastSpeech2ConformerHifiGan(config.vocoder_config) self.config = config @replace_return_docstrings( output_type=FastSpeech2ConformerWithHifiGanOutput, config_class=FastSpeech2ConformerWithHifiGanConfig ) def forward( self, input_ids: torch.LongTensor, attention_mask: Optional[torch.LongTensor] = None, spectrogram_labels: Optional[torch.FloatTensor] = None, duration_labels: Optional[torch.LongTensor] = None, pitch_labels: Optional[torch.FloatTensor] = None, energy_labels: Optional[torch.FloatTensor] = None, speaker_ids: Optional[torch.LongTensor] = None, lang_ids: Optional[torch.LongTensor] = None, speaker_embedding: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> Union[Tuple, FastSpeech2ConformerModelOutput]: """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Input sequence of text vectors. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*, defaults to `None`): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: 0 for tokens that are **masked**, 1 for tokens that are **not masked**. spectrogram_labels (`torch.FloatTensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`, *optional*, defaults to `None`): Batch of padded target features. duration_labels (`torch.LongTensor` of shape `(batch_size, sequence_length + 1)`, *optional*, defaults to `None`): Batch of padded durations. pitch_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`): Batch of padded token-averaged pitch. energy_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`): Batch of padded token-averaged energy. speaker_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`): Speaker ids used to condition features of speech output by the model. lang_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`): Language ids used to condition features of speech output by the model. speaker_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`, *optional*, defaults to `None`): Embedding containing conditioning signals for the features of the speech. return_dict (`bool`, *optional*, defaults to `None`): Whether or not to return a [`FastSpeech2ConformerModelOutput`] instead of a plain tuple. output_attentions (`bool`, *optional*, defaults to `None`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*, defaults to `None`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. Returns: Example: ```python >>> from transformers import ( ... FastSpeech2ConformerTokenizer, ... FastSpeech2ConformerWithHifiGan, ... ) >>> tokenizer = FastSpeech2ConformerTokenizer.from_pretrained("espnet/fastspeech2_conformer") >>> inputs = tokenizer("some text to convert to speech", return_tensors="pt") >>> input_ids = inputs["input_ids"] >>> model = FastSpeech2ConformerWithHifiGan.from_pretrained("espnet/fastspeech2_conformer_with_hifigan") >>> output_dict = model(input_ids, return_dict=True) >>> waveform = output_dict["waveform"] >>> print(waveform.shape) torch.Size([1, 49664]) ``` """ return_dict = return_dict if return_dict is not None else self.config.model_config.use_return_dict output_attentions = ( output_attentions if output_attentions is not None else self.config.model_config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.model_config.output_hidden_states ) model_outputs = self.model( input_ids, attention_mask, spectrogram_labels=spectrogram_labels, duration_labels=duration_labels, pitch_labels=pitch_labels, energy_labels=energy_labels, speaker_ids=speaker_ids, lang_ids=lang_ids, speaker_embedding=speaker_embedding, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if not return_dict: has_missing_labels = ( spectrogram_labels is None or duration_labels is None or pitch_labels is None or energy_labels is None ) if has_missing_labels: spectrogram = model_outputs[0] else: spectrogram = model_outputs[1] else: spectrogram = model_outputs["spectrogram"] waveform = self.vocoder(spectrogram) if not return_dict: return model_outputs + (waveform,) return FastSpeech2ConformerWithHifiGanOutput(waveform=waveform, **model_outputs)
transformers/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py/0
{ "file_path": "transformers/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py", "repo_id": "transformers", "token_count": 33073 }
334
# coding=utf-8 # Copyright 2021 Google AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ FNet model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) FNET_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json", # See all FNet models at https://huggingface.co/models?filter=fnet } class FNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FNetModel`]. It is used to instantiate an FNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FNet [google/fnet-base](https://huggingface.co/google/fnet-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the FNet model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`FNetModel`] or [`TFFNetModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 4): The vocabulary size of the `token_type_ids` passed when calling [`FNetModel`] or [`TFFNetModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. use_tpu_fourier_optimizations (`bool`, *optional*, defaults to `False`): Determines whether to use TPU optimized FFTs. If `True`, the model will favor axis-wise FFTs transforms. Set to `False` for GPU/CPU hardware, in which case n-dimensional FFTs are used. tpu_short_seq_length (`int`, *optional*, defaults to 512): The sequence length that is expected by the model when using TPUs. This will be used to initialize the DFT matrix only when *use_tpu_fourier_optimizations* is set to `True` and the input sequence is shorter than or equal to 4096 tokens. Example: ```python >>> from transformers import FNetConfig, FNetModel >>> # Initializing a FNet fnet-base style configuration >>> configuration = FNetConfig() >>> # Initializing a model (with random weights) from the fnet-base style configuration >>> model = FNetModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "fnet" def __init__( self, vocab_size=32000, hidden_size=768, num_hidden_layers=12, intermediate_size=3072, hidden_act="gelu_new", hidden_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=4, initializer_range=0.02, layer_norm_eps=1e-12, use_tpu_fourier_optimizations=False, tpu_short_seq_length=512, pad_token_id=3, bos_token_id=1, eos_token_id=2, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.use_tpu_fourier_optimizations = use_tpu_fourier_optimizations self.tpu_short_seq_length = tpu_short_seq_length
transformers/src/transformers/models/fnet/configuration_fnet.py/0
{ "file_path": "transformers/src/transformers/models/fnet/configuration_fnet.py", "repo_id": "transformers", "token_count": 2166 }
335
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Funnel checkpoint.""" import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path, base_model): # Initialise PyTorch model config = FunnelConfig.from_json_file(config_file) print(f"Building PyTorch model from configuration: {config}") model = FunnelBaseModel(config) if base_model else FunnelModel(config) # Load weights from tf checkpoint load_tf_weights_in_funnel(model, config, tf_checkpoint_path) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
transformers/src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 797 }
336
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _import_structure = { "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_gpt_bigcode"] = [ "GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTBigCodeForSequenceClassification", "GPTBigCodeForTokenClassification", "GPTBigCodeForCausalLM", "GPTBigCodeModel", "GPTBigCodePreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/gpt_bigcode/__init__.py/0
{ "file_path": "transformers/src/transformers/models/gpt_bigcode/__init__.py", "repo_id": "transformers", "token_count": 792 }
337
# coding=utf-8 # Copyright 2022 Microsoft, clefourrier and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Graphormer model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { # pcqm4mv1 now deprecated "graphormer-base": "https://huggingface.co/clefourrier/graphormer-base-pcqm4mv2/resolve/main/config.json", # See all Graphormer models at https://huggingface.co/models?filter=graphormer } class GraphormerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`~GraphormerModel`]. It is used to instantiate an Graphormer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Graphormer [graphormer-base-pcqm4mv1](https://huggingface.co/graphormer-base-pcqm4mv1) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_classes (`int`, *optional*, defaults to 1): Number of target classes or labels, set to n for binary classification of n tasks. num_atoms (`int`, *optional*, defaults to 512*9): Number of node types in the graphs. num_edges (`int`, *optional*, defaults to 512*3): Number of edges types in the graph. num_in_degree (`int`, *optional*, defaults to 512): Number of in degrees types in the input graphs. num_out_degree (`int`, *optional*, defaults to 512): Number of out degrees types in the input graphs. num_edge_dis (`int`, *optional*, defaults to 128): Number of edge dis in the input graphs. multi_hop_max_dist (`int`, *optional*, defaults to 20): Maximum distance of multi hop edges between two nodes. spatial_pos_max (`int`, *optional*, defaults to 1024): Maximum distance between nodes in the graph attention bias matrices, used during preprocessing and collation. edge_type (`str`, *optional*, defaults to multihop): Type of edge relation chosen. max_nodes (`int`, *optional*, defaults to 512): Maximum number of nodes which can be parsed for the input graphs. share_input_output_embed (`bool`, *optional*, defaults to `False`): Shares the embedding layer between encoder and decoder - careful, True is not implemented. num_layers (`int`, *optional*, defaults to 12): Number of layers. embedding_dim (`int`, *optional*, defaults to 768): Dimension of the embedding layer in encoder. ffn_embedding_dim (`int`, *optional*, defaults to 768): Dimension of the "intermediate" (often named feed-forward) layer in encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads in the encoder. self_attention (`bool`, *optional*, defaults to `True`): Model is self attentive (False not implemented). activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the attention weights. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the activation of the linear transformer layer. layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. bias (`bool`, *optional*, defaults to `True`): Uses bias in the attention module - unsupported at the moment. embed_scale(`float`, *optional*, defaults to None): Scaling factor for the node embeddings. num_trans_layers_to_freeze (`int`, *optional*, defaults to 0): Number of transformer layers to freeze. encoder_normalize_before (`bool`, *optional*, defaults to `False`): Normalize features before encoding the graph. pre_layernorm (`bool`, *optional*, defaults to `False`): Apply layernorm before self attention and the feed forward network. Without this, post layernorm will be used. apply_graphormer_init (`bool`, *optional*, defaults to `False`): Apply a custom graphormer initialisation to the model before training. freeze_embeddings (`bool`, *optional*, defaults to `False`): Freeze the embedding layer, or train it along the model. encoder_normalize_before (`bool`, *optional*, defaults to `False`): Apply the layer norm before each encoder block. q_noise (`float`, *optional*, defaults to 0.0): Amount of quantization noise (see "Training with Quantization Noise for Extreme Model Compression"). (For more detail, see fairseq's documentation on quant_noise). qn_block_size (`int`, *optional*, defaults to 8): Size of the blocks for subsequent quantization with iPQ (see q_noise). kdim (`int`, *optional*, defaults to None): Dimension of the key in the attention, if different from the other values. vdim (`int`, *optional*, defaults to None): Dimension of the value in the attention, if different from the other values. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). traceable (`bool`, *optional*, defaults to `False`): Changes return value of the encoder's inner_state to stacked tensors. Example: ```python >>> from transformers import GraphormerForGraphClassification, GraphormerConfig >>> # Initializing a Graphormer graphormer-base-pcqm4mv2 style configuration >>> configuration = GraphormerConfig() >>> # Initializing a model from the graphormer-base-pcqm4mv1 style configuration >>> model = GraphormerForGraphClassification(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "graphormer" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, num_classes: int = 1, num_atoms: int = 512 * 9, num_edges: int = 512 * 3, num_in_degree: int = 512, num_out_degree: int = 512, num_spatial: int = 512, num_edge_dis: int = 128, multi_hop_max_dist: int = 5, # sometimes is 20 spatial_pos_max: int = 1024, edge_type: str = "multi_hop", max_nodes: int = 512, share_input_output_embed: bool = False, num_hidden_layers: int = 12, embedding_dim: int = 768, ffn_embedding_dim: int = 768, num_attention_heads: int = 32, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, layerdrop: float = 0.0, encoder_normalize_before: bool = False, pre_layernorm: bool = False, apply_graphormer_init: bool = False, activation_fn: str = "gelu", embed_scale: float = None, freeze_embeddings: bool = False, num_trans_layers_to_freeze: int = 0, traceable: bool = False, q_noise: float = 0.0, qn_block_size: int = 8, kdim: int = None, vdim: int = None, bias: bool = True, self_attention: bool = True, pad_token_id=0, bos_token_id=1, eos_token_id=2, **kwargs, ): self.num_classes = num_classes self.num_atoms = num_atoms self.num_in_degree = num_in_degree self.num_out_degree = num_out_degree self.num_edges = num_edges self.num_spatial = num_spatial self.num_edge_dis = num_edge_dis self.edge_type = edge_type self.multi_hop_max_dist = multi_hop_max_dist self.spatial_pos_max = spatial_pos_max self.max_nodes = max_nodes self.num_hidden_layers = num_hidden_layers self.embedding_dim = embedding_dim self.hidden_size = embedding_dim self.ffn_embedding_dim = ffn_embedding_dim self.num_attention_heads = num_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.layerdrop = layerdrop self.encoder_normalize_before = encoder_normalize_before self.pre_layernorm = pre_layernorm self.apply_graphormer_init = apply_graphormer_init self.activation_fn = activation_fn self.embed_scale = embed_scale self.freeze_embeddings = freeze_embeddings self.num_trans_layers_to_freeze = num_trans_layers_to_freeze self.share_input_output_embed = share_input_output_embed self.traceable = traceable self.q_noise = q_noise self.qn_block_size = qn_block_size # These parameters are here for future extensions # atm, the model only supports self attention self.kdim = kdim self.vdim = vdim self.self_attention = self_attention self.bias = bias super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs, )
transformers/src/transformers/models/graphormer/configuration_graphormer.py/0
{ "file_path": "transformers/src/transformers/models/graphormer/configuration_graphormer.py", "repo_id": "transformers", "token_count": 4211 }
338
# coding=utf-8 # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TensorFlow Hubert model.""" from __future__ import annotations import warnings from typing import Any, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput from ...modeling_tf_utils import ( TFPreTrainedModel, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list, stable_softmax from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_hubert import HubertConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "HubertConfig" TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/hubert-base-ls960", # See all Hubert models at https://huggingface.co/models?filter=hubert ] LARGE_NEGATIVE = -1e8 # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._sample_without_replacement def _sample_without_replacement(distribution, num_samples): """ Categorical sampling without replacement is currently not implemented. The gumbel-max trick will do for now - see https://github.com/tensorflow/tensorflow/issues/9260 for more info """ z = -tf.math.log(tf.random.uniform(shape_list(distribution), 0, 1)) _, indices = tf.nn.top_k(distribution + z, num_samples) return indices # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._scatter_values_on_batch_indices def _scatter_values_on_batch_indices(values, batch_indices, output_shape): """ Scatter function as in PyTorch with indices in format (batch_dim, indixes) """ indices_shape = shape_list(batch_indices) # broadcast batch dim to indices_shape broad_casted_batch_dims = tf.reshape( tf.broadcast_to(tf.expand_dims(tf.range(indices_shape[0]), axis=-1), indices_shape), [1, -1] ) # transform batch_indices to pair_indices pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0)) # scatter values to pair indices return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), output_shape) # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._compute_mask_indices def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, min_masks: int = 0, ) -> tf.Tensor: """ Computes random mask spans for a given shape Args: shape: the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps attention_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_length: size of the mask min_masks: minimum number of masked spans Adapted from [fairseq's data_utils.py](https://github.com/pytorch/fairseq/blob/e0788f7007a8473a76db573985031f3c94201e79/fairseq/data/data_utils.py#L376). """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") tf.debugging.assert_less( mask_length, sequence_length, message=( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and" f" `sequence_length`: {sequence_length}`" ), ) # compute number of masked spans in batch num_masked_spans = mask_prob * tf.cast(sequence_length, tf.float32) / mask_length + tf.random.uniform((1,)) num_masked_spans = tf.maximum(num_masked_spans, min_masks) num_masked_spans = tf.cast(num_masked_spans, tf.int32) # make sure num masked indices <= sequence_length num_masked_spans = tf.math.minimum(sequence_length // mask_length, num_masked_spans) num_masked_spans = tf.squeeze(num_masked_spans) # SpecAugment mask to fill spec_aug_mask = tf.zeros((batch_size, sequence_length), dtype=tf.int32) # uniform distribution to sample from, make sure that offset samples are < sequence_length uniform_dist = tf.ones((batch_size, sequence_length - (mask_length - 1))) # get random indices to mask spec_aug_mask_idxs = _sample_without_replacement(uniform_dist, num_masked_spans) # expand masked indices to masked spans spec_aug_mask_idxs = tf.expand_dims(spec_aug_mask_idxs, -1) spec_aug_mask_idxs = tf.tile(spec_aug_mask_idxs, (1, 1, mask_length)) spec_aug_mask_idxs = tf.reshape(spec_aug_mask_idxs, (batch_size, num_masked_spans * mask_length)) offsets = tf.range(mask_length)[tf.newaxis, tf.newaxis, :] offsets = tf.tile(offsets, (batch_size, num_masked_spans, 1)) offsets = tf.reshape(offsets, (batch_size, num_masked_spans * mask_length)) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # scatter indices to mask spec_aug_mask = _scatter_values_on_batch_indices( tf.ones_like(spec_aug_mask_idxs), spec_aug_mask_idxs, tf.shape(spec_aug_mask) ) return spec_aug_mask # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNorm with Wav2Vec2->Hubert class TFHubertGroupNorm(keras.layers.Layer): """ From tensorflow-addons https://www.tensorflow.org/addons/api_docs/python/tfa/layers/GroupNormalization """ def __init__( self, groups: int = 32, axis: int = -1, epsilon: float = 1e-3, center: bool = True, scale: bool = True, beta_initializer: keras.initializers.Initializer = "zeros", gamma_initializer: keras.initializers.Initializer = "ones", beta_regularizer: keras.regularizers.Regularizer = None, gamma_regularizer: keras.regularizers.Regularizer = None, beta_constraint: keras.constraints.Constraint = None, gamma_constraint: keras.constraints.Constraint = None, **kwargs, ): super().__init__(**kwargs) self.supports_masking = True self.groups = groups self.axis = axis self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = keras.initializers.get(beta_initializer) self.gamma_initializer = keras.initializers.get(gamma_initializer) self.beta_regularizer = keras.regularizers.get(beta_regularizer) self.gamma_regularizer = keras.regularizers.get(gamma_regularizer) self.beta_constraint = keras.constraints.get(beta_constraint) self.gamma_constraint = keras.constraints.get(gamma_constraint) self._check_axis() def build(self, input_shape): self._check_if_input_shape_is_none(input_shape) self._set_number_of_groups_for_instance_norm(input_shape) self._check_size_of_dimensions(input_shape) self._create_input_spec(input_shape) self._add_gamma_weight(input_shape) self._add_beta_weight(input_shape) self.built = True super().build(input_shape) def call(self, inputs): input_shape = keras.backend.int_shape(inputs) tensor_input_shape = tf.shape(inputs) reshaped_inputs, group_shape = self._reshape_into_groups(inputs, input_shape, tensor_input_shape) normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape) is_instance_norm = (input_shape[self.axis] // self.groups) == 1 if not is_instance_norm: outputs = tf.reshape(normalized_inputs, tensor_input_shape) else: outputs = normalized_inputs return outputs def get_config(self): config = { "groups": self.groups, "axis": self.axis, "epsilon": self.epsilon, "center": self.center, "scale": self.scale, "beta_initializer": keras.initializers.serialize(self.beta_initializer), "gamma_initializer": keras.initializers.serialize(self.gamma_initializer), "beta_regularizer": keras.regularizers.serialize(self.beta_regularizer), "gamma_regularizer": keras.regularizers.serialize(self.gamma_regularizer), "beta_constraint": keras.constraints.serialize(self.beta_constraint), "gamma_constraint": keras.constraints.serialize(self.gamma_constraint), } base_config = super().get_config() return {**base_config, **config} def compute_output_shape(self, input_shape): return input_shape def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape): group_shape = [tensor_input_shape[i] for i in range(len(input_shape))] is_instance_norm = (input_shape[self.axis] // self.groups) == 1 if not is_instance_norm: group_shape[self.axis] = input_shape[self.axis] // self.groups group_shape.insert(self.axis, self.groups) group_shape = tf.stack(group_shape) reshaped_inputs = tf.reshape(inputs, group_shape) return reshaped_inputs, group_shape else: return inputs, group_shape def _apply_normalization(self, reshaped_inputs, input_shape): group_shape = keras.backend.int_shape(reshaped_inputs) group_reduction_axes = list(range(1, len(group_shape))) is_instance_norm = (input_shape[self.axis] // self.groups) == 1 if not is_instance_norm: axis = -2 if self.axis == -1 else self.axis - 1 else: axis = -1 if self.axis == -1 else self.axis - 1 group_reduction_axes.pop(axis) mean, variance = tf.nn.moments(reshaped_inputs, group_reduction_axes, keepdims=True) gamma, beta = self._get_reshaped_weights(input_shape) normalized_inputs = tf.nn.batch_normalization( reshaped_inputs, mean=mean, variance=variance, scale=gamma, offset=beta, variance_epsilon=self.epsilon, ) return normalized_inputs def _get_reshaped_weights(self, input_shape): broadcast_shape = self._create_broadcast_shape(input_shape) gamma = None beta = None if self.scale: gamma = tf.reshape(self.gamma, broadcast_shape) if self.center: beta = tf.reshape(self.beta, broadcast_shape) return gamma, beta def _check_if_input_shape_is_none(self, input_shape): dim = input_shape[self.axis] if dim is None: raise ValueError( "Axis " + str(self.axis) + " of input tensor should have a defined dimension but the layer received an input with shape " + str(input_shape) + "." ) def _set_number_of_groups_for_instance_norm(self, input_shape): dim = input_shape[self.axis] if self.groups == -1: self.groups = dim def _check_size_of_dimensions(self, input_shape): dim = input_shape[self.axis] if dim < self.groups: raise ValueError( "Number of groups (" + str(self.groups) + ") cannot be more than the number of channels (" + str(dim) + ")." ) if dim % self.groups != 0: raise ValueError( "Number of groups (" + str(self.groups) + ") must be a multiple of the number of channels (" + str(dim) + ")." ) def _check_axis(self): if self.axis == 0: raise ValueError( "You are trying to normalize your batch axis. Do you want to use tf.layer.batch_normalization instead" ) def _create_input_spec(self, input_shape): dim = input_shape[self.axis] self.input_spec = keras.layers.InputSpec(ndim=len(input_shape), axes={self.axis: dim}) def _add_gamma_weight(self, input_shape): dim = input_shape[self.axis] shape = (dim,) if self.scale: self.gamma = self.add_weight( shape=shape, name="gamma", initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint, ) else: self.gamma = None def _add_beta_weight(self, input_shape): dim = input_shape[self.axis] shape = (dim,) if self.center: self.beta = self.add_weight( shape=shape, name="beta", initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint, ) else: self.beta = None def _create_broadcast_shape(self, input_shape): broadcast_shape = [1] * len(input_shape) is_instance_norm = (input_shape[self.axis] // self.groups) == 1 if not is_instance_norm: broadcast_shape[self.axis] = input_shape[self.axis] // self.groups broadcast_shape.insert(self.axis, self.groups) else: broadcast_shape[self.axis] = self.groups return broadcast_shape # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2WeightNormConv1D with Wav2Vec2->Hubert class TFHubertWeightNormConv1D(keras.layers.Conv1D): """Adapted from https://www.tensorflow.org/probability/api_docs/python/tfp/layers/weight_norm/WeightNorm""" def __init__(self, filters, kernel_size, groups, explicit_padding, **kwargs): super().__init__( filters=filters, kernel_size=kernel_size, groups=groups, padding="valid", use_bias=True, bias_initializer="he_normal", **kwargs, ) self.explicit_padding = explicit_padding self.filter_axis = 2 self.kernel_norm_axes = tf.constant([0, 1]) def _init_norm(self): """Set the norm of the weight vector.""" kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.weight_v), axis=self.kernel_norm_axes)) self.weight_g.assign(kernel_norm[:, tf.newaxis, tf.newaxis]) def _normalize_kernel(self): """Generate normalized weights.""" kernel = tf.nn.l2_normalize(self.weight_v, axis=self.kernel_norm_axes) * tf.transpose(self.weight_g) self.kernel = tf.transpose(kernel) def build(self, input_shape): if not self.built: super().build(input_shape) self.kernel = tf.Variable(tf.transpose(self.kernel), name="weight_v", trainable=True) self.weight_v = self.kernel self.weight_g = self.add_weight( name="weight_g", shape=(int(self.weight_v.shape[self.filter_axis]), 1, 1), initializer="ones", dtype=self.weight_v.dtype, trainable=True, ) self._init_norm() self.bias = self.add_weight(name="bias", shape=(self.filters,), initializer="zeros", trainable=True) def call(self, inputs): # TODO Matt: Assigning to attributes in call() is deeply sinful in TensorFlow, as it should be idempotent. # This whole layer should be replaced by a layer that doesn't inherit from Conv1D, but instead calls # a functional 1d convolution with normalized weights that it generates (but does not store!) self._normalize_kernel() padded_inputs = tf.pad(inputs, ((0, 0), (self.explicit_padding, self.explicit_padding), (0, 0))) output = super().call(padded_inputs) return output # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2NoLayerNormConvLayer with Wav2Vec2->Hubert class TFHubertNoLayerNormConvLayer(keras.layers.Layer): def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None: super().__init__(**kwargs) self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = keras.layers.Conv1D( filters=self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], strides=config.conv_stride[layer_id], use_bias=config.conv_bias, name="conv", ) self.activation = get_tf_activation(config.feat_extract_activation) def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv", None) is not None: with tf.name_scope(self.conv.name): self.conv.build([None, None, self.in_conv_dim]) # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2LayerNormConvLayer with Wav2Vec2->Hubert class TFHubertLayerNormConvLayer(keras.layers.Layer): def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None: super().__init__(**kwargs) self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = keras.layers.Conv1D( filters=self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], strides=config.conv_stride[layer_id], use_bias=config.conv_bias, name="conv", ) self.layer_norm = keras.layers.LayerNormalization(name="layer_norm", epsilon=config.layer_norm_eps) self.activation = get_tf_activation(config.feat_extract_activation) def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv", None) is not None: with tf.name_scope(self.conv.name): self.conv.build([None, None, self.in_conv_dim]) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.out_conv_dim]) # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNormConvLayer with Wav2Vec2->Hubert class TFHubertGroupNormConvLayer(keras.layers.Layer): def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None: super().__init__(**kwargs) self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = keras.layers.Conv1D( filters=self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], strides=config.conv_stride[layer_id], use_bias=config.conv_bias, name="conv", ) self.activation = get_tf_activation(config.feat_extract_activation) self.layer_norm = TFHubertGroupNorm(groups=self.out_conv_dim, epsilon=config.layer_norm_eps, name="layer_norm") def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv", None) is not None: with tf.name_scope(self.conv.name): self.conv.build([None, None, self.in_conv_dim]) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.out_conv_dim]) # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2PositionalConvEmbedding with Wav2Vec2->Hubert class TFHubertPositionalConvEmbedding(keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs: Any) -> None: super().__init__(**kwargs) self.conv = TFHubertWeightNormConv1D( filters=config.hidden_size, kernel_size=config.num_conv_pos_embeddings, groups=config.num_conv_pos_embedding_groups, explicit_padding=config.num_conv_pos_embeddings // 2, name="conv", ) self.padding = TFHubertSamePadLayer(config.num_conv_pos_embeddings) self.activation = get_tf_activation(config.feat_extract_activation) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv", None) is not None: with tf.name_scope(self.conv.name): self.conv.build([None, None, self.config.hidden_size]) # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2SamePadLayer with Wav2Vec2->Hubert class TFHubertSamePadLayer(keras.layers.Layer): def __init__(self, num_conv_pos_embeddings, **kwargs): super().__init__(**kwargs) self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def call(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, : -self.num_pad_remove, :] return hidden_states class TFHubertFeatureEncoder(keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs: Any) -> None: super().__init__(**kwargs) if config.feat_extract_norm == "group": conv_layers = [TFHubertGroupNormConvLayer(config, layer_id=0, name=f"conv_layers.{0}")] + [ TFHubertNoLayerNormConvLayer(config, layer_id=i + 1, name=f"conv_layers.{i+1}") for i in range(config.num_feat_extract_layers - 1) ] elif config.feat_extract_norm == "layer": conv_layers = [ TFHubertLayerNormConvLayer(config, layer_id=i, name=f"conv_layers.{i}") for i in range(config.num_feat_extract_layers) ] else: raise ValueError( f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']" ) self.conv_layers = conv_layers def call(self, input_values): hidden_states = tf.expand_dims(input_values, -1) for conv_layer in self.conv_layers: hidden_states = conv_layer(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True for conv_layer in self.conv_layers: with tf.name_scope(conv_layer.name): conv_layer.build(None) class TFHubertFeatureExtractor(TFHubertFeatureEncoder): def __init__(self, config, **kwargs): super().__init__(config, **kwargs) warnings.warn( f"The class `{self.__class__.__name__}` has been depreciated " "and will be removed in Transformers v5. " f"Use `{self.__class__.__bases__[0].__name__}` instead.", FutureWarning, ) class TFHubertFeatureProjection(keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.projection = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="projection", ) self.dropout = keras.layers.Dropout(rate=config.feat_proj_dropout) self.config = config def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(hidden_states) hidden_states = self.dropout(hidden_states, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.conv_dim[-1]]) if getattr(self, "projection", None) is not None: with tf.name_scope(self.projection.name): self.projection.build([None, None, self.config.conv_dim[-1]]) # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with TFBart->TFHubert class TFHubertAttention(keras.layers.Layer): """Multi-headed attention from "Attention Is All You Need""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None = None, past_key_value: Tuple[Tuple[tf.Tensor]] | None = None, attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Tuple[tf.Tensor, tf.Tensor | None]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {shape_list(attn_weights)}" ), ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {shape_list(attention_mask)}" ), ) attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {shape_list(attn_output)}" ), ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2FeedForward with Wav2Vec2->Hubert class TFHubertFeedForward(keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.intermediate_dropout = keras.layers.Dropout(config.activation_dropout) self.intermediate_dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="intermediate_dense", ) self.intermediate_act_fn = get_tf_activation(config.hidden_act) self.output_dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="output_dense", ) self.output_dropout = keras.layers.Dropout(config.hidden_dropout) self.config = config def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states, training=training) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "intermediate_dense", None) is not None: with tf.name_scope(self.intermediate_dense.name): self.intermediate_dense.build([None, None, self.config.hidden_size]) if getattr(self, "output_dense", None) is not None: with tf.name_scope(self.output_dense.name): self.output_dense.build([None, None, self.config.intermediate_size]) # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayer with Wav2Vec2->Hubert class TFHubertEncoderLayer(keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.attention = TFHubertAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, name="attention", ) self.dropout = keras.layers.Dropout(config.hidden_dropout) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.feed_forward = TFHubertFeedForward(config, name="feed_forward") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm") self.config = config def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, output_attentions: Optional[bool] = False, training: bool = False, ) -> Tuple[tf.Tensor]: attn_residual = hidden_states hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, training=training ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, "feed_forward", None) is not None: with tf.name_scope(self.feed_forward.name): self.feed_forward.build(None) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->Hubert class TFHubertEncoderLayerStableLayerNorm(keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.attention = TFHubertAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, name="attention", ) self.dropout = keras.layers.Dropout(config.hidden_dropout) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.feed_forward = TFHubertFeedForward(config, name="feed_forward") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm") self.config = config def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, output_attentions: Optional[bool] = False, training: bool = False, ) -> Tuple[tf.Tensor]: attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, training=training ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states)) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, "feed_forward", None) is not None: with tf.name_scope(self.feed_forward.name): self.feed_forward.build(None) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2Encoder with Wav2Vec2->Hubert class TFHubertEncoder(keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.config = config self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name="pos_conv_embed") self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.dropout = keras.layers.Dropout(config.hidden_dropout) self.layer = [TFHubertEncoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, training: Optional[bool] = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: hidden_states = hidden_states * tf.expand_dims(attention_mask, -1) attention_mask = _expand_mask(attention_mask) else: attention_mask = None position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states, training=training) for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = np.random.uniform(0, 1) if training and (dropout_probability < self.config.layerdrop): # skip the layer continue layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "pos_conv_embed", None) is not None: with tf.name_scope(self.pos_conv_embed.name): self.pos_conv_embed.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderStableLayerNorm with Wav2Vec2->Hubert class TFHubertEncoderStableLayerNorm(keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.config = config self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name="pos_conv_embed") self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.dropout = keras.layers.Dropout(config.hidden_dropout) self.layer = [ TFHubertEncoderLayerStableLayerNorm(config, name=f"layers.{i}") for i in range(config.num_hidden_layers) ] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, training: Optional[bool] = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: hidden_states = hidden_states * tf.expand_dims(attention_mask, -1) attention_mask = _expand_mask(attention_mask) else: attention_mask = None position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states, training=training) for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = np.random.uniform(0, 1) if training and (dropout_probability < self.config.layerdrop): # skip the layer continue layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "pos_conv_embed", None) is not None: with tf.name_scope(self.pos_conv_embed.name): self.pos_conv_embed.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFHubertMainLayer(keras.layers.Layer): config_class = HubertConfig def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.config = config self.feature_extractor = TFHubertFeatureEncoder(config, name="feature_extractor") self.feature_projection = TFHubertFeatureProjection(config, name="feature_projection") if config.do_stable_layer_norm: self.encoder = TFHubertEncoderStableLayerNorm(config, name="encoder") else: self.encoder = TFHubertEncoder(config, name="encoder") def build(self, input_shape=None): self.masked_spec_embed = self.add_weight( shape=(self.config.hidden_size,), initializer="uniform", trainable=True, name="masked_spec_embed" ) if self.built: return self.built = True if getattr(self, "feature_extractor", None) is not None: with tf.name_scope(self.feature_extractor.name): self.feature_extractor.build(None) if getattr(self, "feature_projection", None) is not None: with tf.name_scope(self.feature_projection.name): self.feature_projection.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor): """ Computes the output length of the convolutional layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) return input_lengths def _mask_hidden_states(self, hidden_states: tf.Tensor, mask_time_indices: tf.Tensor | None = None): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://arxiv.org/abs/1904.08779). """ batch_size, sequence_length, hidden_size = shape_list(hidden_states) # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return hidden_states if mask_time_indices is not None: # apply SpecAugment along time axis with given mask_time_indices hidden_states = tf.where( tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool), self.masked_spec_embed[tf.newaxis, tf.newaxis, :], hidden_states, ) elif self.config.mask_time_prob > 0: # generate indices & apply SpecAugment along time axis mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, min_masks=2, ) hidden_states = tf.where( tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool), self.masked_spec_embed[tf.newaxis, tf.newaxis, :], hidden_states, ) # apply SpecAugment along feature axis if self.config.mask_feature_prob > 0: mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, ) hidden_states = tf.where(mask_feature_indices[:, tf.newaxis, :], hidden_states, 0) return hidden_states @unpack_inputs def call( self, input_values: tf.Tensor, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: tf.Tensor | None = None, output_hidden_states: tf.Tensor | None = None, return_dict: Optional[bool] = None, training: bool = False, **kwargs: Any, ): hidden_states = self.feature_extractor(tf.cast(input_values, tf.float32), training=training) if attention_mask is not None: # compute real output lengths according to convolution formula output_lengths = self._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, -1)) attention_mask = tf.sequence_mask( output_lengths, maxlen=shape_list(hidden_states)[1], dtype=hidden_states.dtype ) hidden_states = self.feature_projection(hidden_states, training=training) mask_time_indices = kwargs.get("mask_time_indices", None) if training: hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = encoder_outputs[0] if not return_dict: return (hidden_states,) + encoder_outputs[1:] return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class TFHubertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = HubertConfig base_model_prefix = "hubert" main_input_name = "input_values" @property def input_signature(self): return { "input_values": tf.TensorSpec((None, 16000), tf.float32, name="input_values"), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), } def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) logger.warning( f"\n{self.__class__.__name__} has backpropagation operations that are NOT supported on CPU. If you wish " "to train/fine-tune this model, you need a GPU or a TPU" ) HUBERT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_values` only and nothing else: `model(input_values)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_values, attention_mask])` or `model([input_values, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_values": input_values, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`HubertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ HUBERT_INPUTS_DOCSTRING = r""" Args: input_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_values` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_values` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare TFHubert Model transformer outputing raw hidden-states without any specific head on top.", HUBERT_START_DOCSTRING, ) class TFHubertModel(TFHubertPreTrainedModel): def __init__(self, config: HubertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config self.hubert = TFHubertMainLayer(config, name="hubert") @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, input_values: tf.Tensor, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: """ Returns: Example: ```python >>> from transformers import AutoProcessor, TFHubertModel >>> from datasets import load_dataset >>> import soundfile as sf >>> processor = AutoProcessor.from_pretrained("facebook/hubert-large-ls960-ft") >>> model = TFHubertModel.from_pretrained("facebook/hubert-large-ls960-ft") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1 >>> hidden_states = model(input_values).last_hidden_state ```""" output_hidden_states = output_hidden_states if output_hidden_states else self.config.output_hidden_states output_attentions = output_attentions if output_attentions else self.config.output_attentions return_dict = return_dict if return_dict else self.config.return_dict outputs = self.hubert( input_values=input_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "hubert", None) is not None: with tf.name_scope(self.hubert.name): self.hubert.build(None) @add_start_docstrings( """TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""", HUBERT_START_DOCSTRING, ) class TFHubertForCTC(TFHubertPreTrainedModel): def __init__(self, config: HubertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.hubert = TFHubertMainLayer(config, name="hubert") self.dropout = keras.layers.Dropout(config.final_dropout) self.lm_head = keras.layers.Dense(config.vocab_size, name="lm_head") self.output_hidden_size = ( config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size ) def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.hubert.feature_extractor.trainable = False @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFCausalLMOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, input_values: tf.Tensor, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: Optional[bool] = None, labels: tf.Tensor | None = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_values` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import AutoProcessor, TFHubertForCTC >>> from datasets import load_dataset >>> import soundfile as sf >>> processor = AutoProcessor.from_pretrained("facebook/hubert-large-ls960-ft") >>> model = TFHubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1 >>> logits = model(input_values).logits >>> predicted_ids = tf.argmax(logits, axis=-1) >>> transcription = processor.decode(predicted_ids[0]) >>> # compute loss >>> target_transcription = "A MAN SAID TO THE UNIVERSE SIR I EXIST" >>> # Pass the transcription as text to encode labels >>> labels = processor(text=transcription, return_tensors="tf").input_values >>> loss = model(input_values, labels=labels).loss ```""" outputs = self.hubert( input_values=input_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, training=training) logits = self.lm_head(hidden_states) if labels is not None: if tf.reduce_max(labels) >= self.config.vocab_size: raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") attention_mask = ( attention_mask if attention_mask is not None else tf.ones_like(input_values, dtype=tf.float32) ) input_lengths = self.hubert._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, axis=-1)) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = tf.cast(labels >= 0, tf.int32) target_lengths = tf.reduce_sum(labels_mask, axis=-1) loss = tf.nn.ctc_loss( logits=logits, labels=labels, logit_length=input_lengths, label_length=target_lengths, blank_index=self.config.pad_token_id, logits_time_major=False, ) if self.config.ctc_loss_reduction == "sum": loss = tf.reduce_sum(loss) loss = tf.reshape(loss, (1,)) if self.config.ctc_loss_reduction == "mean": loss = tf.reduce_mean(loss) loss = tf.reshape(loss, (1,)) else: loss = None if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "hubert", None) is not None: with tf.name_scope(self.hubert.name): self.hubert.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build([None, None, self.output_hidden_size])
transformers/src/transformers/models/hubert/modeling_tf_hubert.py/0
{ "file_path": "transformers/src/transformers/models/hubert/modeling_tf_hubert.py", "repo_id": "transformers", "token_count": 31265 }
339
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for ImageGPT.""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) def squared_euclidean_distance(a, b): b = b.T a2 = np.sum(np.square(a), axis=1) b2 = np.sum(np.square(b), axis=0) ab = np.matmul(a, b) d = a2[:, None] - 2 * ab + b2[None, :] return d def color_quantize(x, clusters): x = x.reshape(-1, 3) d = squared_euclidean_distance(x, clusters) return np.argmin(d, axis=1) class ImageGPTImageProcessor(BaseImageProcessor): r""" Constructs a ImageGPT image processor. This image processor can be used to resize images to a smaller resolution (such as 32x32 or 64x64), normalize them and finally color quantize them to obtain sequences of "pixel values" (color clusters). Args: clusters (`np.ndarray` or `List[List[int]]`, *optional*): The color clusters to use, of shape `(n_clusters, 3)` when color quantizing. Can be overriden by `clusters` in `preprocess`. do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's dimensions to `(size["height"], size["width"])`. Can be overridden by `do_resize` in `preprocess`. size (`Dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`): Size of the image after resizing. Can be overridden by `size` in `preprocess`. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image pixel value to between [-1, 1]. Can be overridden by `do_normalize` in `preprocess`. do_color_quantize (`bool`, *optional*, defaults to `True`): Whether to color quantize the image. Can be overridden by `do_color_quantize` in `preprocess`. """ model_input_names = ["pixel_values"] def __init__( self, # clusters is a first argument to maintain backwards compatibility with the old ImageGPTImageProcessor clusters: Optional[Union[List[List[int]], np.ndarray]] = None, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_normalize: bool = True, do_color_quantize: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 256, "width": 256} size = get_size_dict(size) self.clusters = np.array(clusters) if clusters is not None else None self.do_resize = do_resize self.size = size self.resample = resample self.do_normalize = do_normalize self.do_color_quantize = do_color_quantize # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def normalize( self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Normalizes an images' pixel values to between [-1, 1]. Args: image (`np.ndarray`): Image to normalize. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ image = rescale(image=image, scale=1 / 127.5, data_format=data_format, input_data_format=input_data_format) image = image - 1 return image def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_normalize: bool = None, do_color_quantize: Optional[bool] = None, clusters: Optional[Union[List[List[int]], np.ndarray]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_normalize=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image do_color_quantize (`bool`, *optional*, defaults to `self.do_color_quantize`): Whether to color quantize the image. clusters (`np.ndarray` or `List[List[int]]`, *optional*, defaults to `self.clusters`): Clusters used to quantize the image of shape `(n_clusters, 3)`. Only has an effect if `do_color_quantize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. Only has an effect if `do_color_quantize` is set to `False`. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size) resample = resample if resample is not None else self.resample do_normalize = do_normalize if do_normalize is not None else self.do_normalize do_color_quantize = do_color_quantize if do_color_quantize is not None else self.do_color_quantize clusters = clusters if clusters is not None else self.clusters clusters = np.array(clusters) images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_normalize: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If you wish to do this, " "make sure to set `do_normalize` to `False` and that pixel values are between [-1, 1].", ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_normalize: images = [self.normalize(image=image, input_data_format=input_data_format) for image in images] if do_color_quantize: images = [to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) images = np.array(images) images = color_quantize(images, clusters).reshape(images.shape[:-1]) # flatten to (batch_size, height*width) batch_size = images.shape[0] images = images.reshape(batch_size, -1) # We need to convert back to a list of images to keep consistent behaviour across processors. images = list(images) else: images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"input_ids": images} return BatchFeature(data=data, tensor_type=return_tensors)
transformers/src/transformers/models/imagegpt/image_processing_imagegpt.py/0
{ "file_path": "transformers/src/transformers/models/imagegpt/image_processing_imagegpt.py", "repo_id": "transformers", "token_count": 5907 }
340
# coding=utf-8 # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ KOSMOS-2 model configuration""" import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/kosmos-2-patch14-224": ( "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/config.json" ), # See all KOSMOS-2 models at https://huggingface.co/models?filter=kosmos-2 } class Kosmos2TextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Kosmos2TextModel`]. It is used to instantiate a KOSMOS-2 text decoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the text decoder of the KOSMOS-2 [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 65037): Vocabulary size of the Kosmos2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Kosmos2Model`]. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). embed_dim (`int`, *optional*, defaults to 2048): Dimensionality of the layers and the pooler layer. layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. ffn_dim (`int`, *optional*, defaults to 8192): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_embedding (`bool`, *optional*, defaults to `True`): Scale embeddings by diving by sqrt(embed_dim). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). ```""" model_type = "kosmos_2_text_model" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_attention_heads": "attention_heads", "hidden_size": "embed_dim", "num_hidden_layers": "layers", } def __init__( self, vocab_size=65037, max_position_embeddings=2048, embed_dim=2048, layers=24, ffn_dim=8192, attention_heads=32, activation_function="gelu", dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, layerdrop=0.0, layer_norm_eps=1e-5, init_std=0.02, scale_embedding=True, use_cache=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs, ) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.embed_dim = embed_dim self.layers = layers self.ffn_dim = ffn_dim self.attention_heads = attention_heads self.activation_function = activation_function self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.init_std = init_std self.scale_embedding = scale_embedding self.use_cache = use_cache @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) # get the text config dict if we are loading from Kosmos2Config if config_dict.get("model_type") == "kosmos-2": config_dict = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) class Kosmos2VisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Kosmos2VisionModel`]. It is used to instantiate a KOSMOS-2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the vision encoder of the KOSMOS-2 [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): The number of input channels. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 14): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). ```""" model_type = "kosmos_2_vision_model" def __init__( self, hidden_size=1024, intermediate_size=4096, num_hidden_layers=24, num_attention_heads=16, num_channels=3, image_size=224, patch_size=14, hidden_act="quick_gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) # get the vision config dict if we are loading from Kosmos2Config if config_dict.get("model_type") == "kosmos-2": config_dict = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) class Kosmos2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Kosmos2Model`]. It is used to instantiate a KOSMOS-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the KOSMOS-2 [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Kosmos2TextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Kosmos2VisionConfig`]. latent_query_num (`int`, *optional*, defaults to 64): The number of latent query tokens that represent the image features used in the text decoder component. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import Kosmos2Config, Kosmos2Model >>> # Initializing a Kosmos-2 kosmos-2-patch14-224 style configuration >>> configuration = Kosmos2Config() >>> # Initializing a model (with random weights) from the kosmos-2-patch14-224 style configuration >>> model = Kosmos2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "kosmos-2" is_composition = True def __init__( self, text_config=None, vision_config=None, latent_query_num=64, **kwargs, ): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info("`text_config` is `None`. Initializing the `Kosmos2TextConfig` with default values.") if vision_config is None: vision_config = {} logger.info("`vision_config` is `None`. Initializing the `Kosmos2VisionConfig` with default values.") self.text_config = Kosmos2TextConfig(**text_config) self.vision_config = Kosmos2VisionConfig(**vision_config) self.latent_query_num = latent_query_num
transformers/src/transformers/models/kosmos2/configuration_kosmos2.py/0
{ "file_path": "transformers/src/transformers/models/kosmos2/configuration_kosmos2.py", "repo_id": "transformers", "token_count": 5219 }
341
# coding=utf-8 # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for LayoutLMv2.""" import collections import os import sys import unicodedata from typing import Dict, List, Optional, Tuple, Union from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...tokenization_utils_base import ( BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy, ) from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "microsoft/layoutlmv2-base-uncased": ( "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/vocab.txt" ), "microsoft/layoutlmv2-large-uncased": ( "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/vocab.txt" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "microsoft/layoutlmv2-base-uncased": 512, "microsoft/layoutlmv2-large-uncased": 512, } PRETRAINED_INIT_CONFIGURATION = { "microsoft/layoutlmv2-base-uncased": {"do_lower_case": True}, "microsoft/layoutlmv2-large-uncased": {"do_lower_case": True}, } LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING = r""" add_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (`int`, *optional*, defaults to 0): If set to a number along with `max_length`, the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. """ LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" return_token_type_ids (`bool`, *optional*): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_overflowing_tokens (`bool`, *optional*, defaults to `False`): Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead of returning overflowing tokens. return_special_tokens_mask (`bool`, *optional*, defaults to `False`): Whether or not to return special tokens mask information. return_offsets_mapping (`bool`, *optional*, defaults to `False`): Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using Python's tokenizer, this method will raise `NotImplementedError`. return_length (`bool`, *optional*, defaults to `False`): Whether or not to return the lengths of the encoded inputs. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. **kwargs: passed to the `self.tokenize()` method Return: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **bbox** -- List of bounding boxes to be fed to a model. - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or if *"token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified). - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and `return_overflowing_tokens=True`). - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and `return_overflowing_tokens=True`). - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`). - **length** -- The length of the inputs (when `return_length=True`). """ def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens table = dict.fromkeys(i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith("P")) def subfinder(mylist, pattern): matches = [] indices = [] for idx, i in enumerate(range(len(mylist))): if mylist[i] == pattern[0] and mylist[i : i + len(pattern)] == pattern: matches.append(pattern) indices.append(idx) if matches: return matches[0], indices[0] else: return None, 0 class LayoutLMv2Tokenizer(PreTrainedTokenizer): r""" Construct a LayoutLMv2 tokenizer. Based on WordPiece. [`LayoutLMv2Tokenizer`] can be used to turn words, word-level bounding boxes and optional word labels to token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`, and optional `labels` (for token classification). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. [`LayoutLMv2Tokenizer`] runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the word-level bounding boxes into token-level bounding boxes. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, tokenize_chinese_chars=True, strip_accents=None, model_max_length: int = 512, additional_special_tokens: Optional[List[str]] = None, **kwargs, ): sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token mask_token = AddedToken(mask_token, special=True) if isinstance(mask_token, str) else mask_token if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) # additional properties self.cls_token_box = cls_token_box self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, model_max_length=model_max_length, additional_special_tokens=additional_special_tokens, **kwargs, ) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, boxes: Union[List[List[int]], List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`List[List[int]]`, `List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`List[int]`, `List[List[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = words if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "Words must be of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be words if not isinstance(text, (list, tuple)): raise ValueError( "Words must be of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError("You must provide corresponding bounding boxes") if is_batched: if len(words) != len(boxes): raise ValueError("You must provide words and boxes for an equal amount of examples") for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError("You must provide as many words as there are bounding boxes") else: if len(words) != len(boxes): raise ValueError("You must provide as many words as there are bounding boxes") if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) batch_outputs = self._batch_prepare_for_model( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model( self, batch_text_or_text_pairs, is_pair: bool = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Args: batch_ids_pairs: list of tokenized input ids or input ids pairs """ batch_outputs = {} for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)): batch_text_or_text_pair, boxes_example = example outputs = self.prepare_for_model( batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING) def encode( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> List[int]: encoded_inputs = self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) return encoded_inputs["input_ids"] @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated, `__call__` should be used instead. Args: text (`str`, `List[str]`, `List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._encode_plus( text=text, boxes=boxes, text_pair=text_pair, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) return self.prepare_for_model( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def prepare_for_model( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs, ) -> BatchEncoding: """ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error. Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into token-level `labels`. The word label is used for the first token of the word, while remaining tokens are labeled with -100, such that they will be ignored by the loss function. Args: text (`str`, `List[str]`, `List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) tokens = [] pair_tokens = [] token_boxes = [] pair_token_boxes = [] labels = [] if text_pair is None: if word_labels is None: # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference) for word, box in zip(text, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) else: # CASE 2: token classification (training) for word, box, label in zip(text, boxes, word_labels): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) if self.only_label_first_subword: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1)) else: labels.extend([label] * len(word_tokens)) else: # CASE 3: document visual question answering (inference) # text = question # text_pair = words tokens = self.tokenize(text) token_boxes = [self.pad_token_box for _ in range(len(tokens))] for word, box in zip(text_pair, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) pair_tokens.extend(word_tokens) pair_token_boxes.extend([box] * len(word_tokens)) # Create ids + pair_ids ids = self.convert_tokens_to_ids(tokens) pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None if ( return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is not None ): raise ValueError( "Not possible to return overflowing tokens for pair of sequences with the " "`longest_first`. Please select another truncation strategy than `longest_first`, " "for instance `only_second` or `only_first`." ) # Compute the total size of the returned encodings pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) # Truncation: Handle max sequence length overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) = self.truncate_sequences( ids, token_boxes, pair_ids=pair_ids, pair_token_boxes=pair_token_boxes, labels=labels, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_token_type_ids and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes encoded_inputs["overflowing_labels"] = overflowing_labels encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box] if pair_token_boxes: pair_token_boxes = pair_token_boxes + [self.sep_token_box] if labels: labels = [self.pad_token_label] + labels + [self.pad_token_label] else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) # Build output dictionary encoded_inputs["input_ids"] = sequence encoded_inputs["bbox"] = token_boxes + pair_token_boxes if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) if labels: encoded_inputs["labels"] = labels # Check lengths self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) # Padding if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs def truncate_sequences( self, ids: List[int], token_boxes: List[List[int]], pair_ids: Optional[List[int]] = None, pair_token_boxes: Optional[List[List[int]]] = None, labels: Optional[List[int]] = None, num_tokens_to_remove: int = 0, truncation_strategy: Union[str, TruncationStrategy] = "longest_first", stride: int = 0, ) -> Tuple[List[int], List[int], List[int]]: """ Truncates a sequence pair in-place following the strategy. Args: ids (`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. token_boxes (`List[List[int]]`): Bounding boxes of the first sequence. pair_ids (`List[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_token_boxes (`List[List[int]]`, *optional*): Bounding boxes of the second sequence. labels (`List[int]`, *optional*): Labels of the first sequence (for token classification tasks). num_tokens_to_remove (`int`, *optional*, defaults to 0): Number of tokens to remove using the truncation strategy. truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): The strategy to follow for truncation. Can be: - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). stride (`int`, *optional*, defaults to 0): If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. Returns: `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair of sequences (or a batch of pairs) is provided. """ if num_tokens_to_remove <= 0: return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], [] if not isinstance(truncation_strategy, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation_strategy) overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy == TruncationStrategy.ONLY_FIRST or ( truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None ): if len(ids) > num_tokens_to_remove: window_len = min(len(ids), stride + num_tokens_to_remove) overflowing_tokens = ids[-window_len:] overflowing_token_boxes = token_boxes[-window_len:] overflowing_labels = labels[-window_len:] ids = ids[:-num_tokens_to_remove] token_boxes = token_boxes[:-num_tokens_to_remove] labels = labels[:-num_tokens_to_remove] else: error_msg = ( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the first sequence has a length {len(ids)}. " ) if truncation_strategy == TruncationStrategy.ONLY_FIRST: error_msg = ( error_msg + "Please select another truncation strategy than " f"{truncation_strategy}, for instance 'longest_first' or 'only_second'." ) logger.error(error_msg) elif truncation_strategy == TruncationStrategy.LONGEST_FIRST: logger.warning( "Be aware, overflowing tokens are not returned for the setting you have chosen," f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' " "truncation strategy. So the returned list will always be empty even if some " "tokens have been removed." ) for _ in range(num_tokens_to_remove): if pair_ids is None or len(ids) > len(pair_ids): ids = ids[:-1] token_boxes = token_boxes[:-1] labels = labels[:-1] else: pair_ids = pair_ids[:-1] pair_token_boxes = pair_token_boxes[:-1] elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: if len(pair_ids) > num_tokens_to_remove: window_len = min(len(pair_ids), stride + num_tokens_to_remove) overflowing_tokens = pair_ids[-window_len:] overflowing_token_boxes = pair_token_boxes[-window_len:] pair_ids = pair_ids[:-num_tokens_to_remove] pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the second sequence has a length {len(pair_ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " "for instance 'longest_first' or 'only_first'." ) return ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) if self.padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "bbox" in encoded_inputs: encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif self.padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "bbox" in encoded_inputs: encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer class BasicTokenizer(object): """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. """ def __init__( self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True, ): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) # prevents treating the same character with different unicode codepoints as different characters unicode_normalized_text = unicodedata.normalize("NFC", text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
transformers/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py/0
{ "file_path": "transformers/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py", "repo_id": "transformers", "token_count": 33477 }
342
# coding=utf-8 # Copyright 2021 Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ LED model configuration""" from typing import List, Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) LED_PRETRAINED_CONFIG_ARCHIVE_MAP = { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/config.json", # See all LED models at https://huggingface.co/models?filter=led } class LEDConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LEDModel`]. It is used to instantiate an LED model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LED [allenai/led-base-16384](https://huggingface.co/allenai/led-base-16384) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the LED model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LEDModel`] or [`TFLEDModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_encoder_position_embeddings (`int`, *optional*, defaults to 16384): The maximum sequence length that the encoder might ever be used with. max_decoder_position_embeddings (`int`, *optional*, defaults to 16384): The maximum sequence length that the decoder might ever be used with. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) Example: ```python >>> from transformers import LEDModel, LEDConfig >>> # Initializing a LED allenai/led-base-16384 style configuration >>> configuration = LEDConfig() >>> # Initializing a model from the allenai/led-base-16384 style configuration >>> model = LEDModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "led" attribute_map = { "num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model", "attention_probs_dropout_prob": "attention_dropout", "initializer_range": "init_std", } def __init__( self, vocab_size=50265, max_encoder_position_embeddings=16384, max_decoder_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, classifier_dropout=0.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, attention_window: Union[List[int], int] = 512, **kwargs, ): self.vocab_size = vocab_size self.max_encoder_position_embeddings = max_encoder_position_embeddings self.max_decoder_position_embeddings = max_decoder_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.attention_window = attention_window super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs, )
transformers/src/transformers/models/led/configuration_led.py/0
{ "file_path": "transformers/src/transformers/models/led/configuration_led.py", "repo_id": "transformers", "token_count": 2973 }
343
# Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion" ) LlamaTokenizerFast = None """ Sample usage: ``` python src/transformers/models/llama/convert_llama_weights_to_hf.py \ --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path ``` Thereafter, models can be loaded via: ```py from transformers import LlamaForCausalLM, LlamaTokenizer model = LlamaForCausalLM.from_pretrained("/output/path") tokenizer = LlamaTokenizer.from_pretrained("/output/path") ``` Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). """ NUM_SHARDS = { "7B": 1, "7Bf": 1, "13B": 2, "13Bf": 2, "34B": 4, "30B": 4, "65B": 8, "70B": 8, "70Bf": 8, } def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256): return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of) def read_json(path): with open(path, "r") as f: return json.load(f) def write_json(text, path): with open(path, "w") as f: json.dump(text, f) def write_model(model_path, input_base_path, model_size, tokenizer_path=None, safe_serialization=True): # for backward compatibility, before you needed the repo to be called `my_repo/model_size` if not os.path.isfile(os.path.join(input_base_path, "params.json")): input_base_path = os.path.join(input_base_path, model_size) os.makedirs(model_path, exist_ok=True) tmp_model_path = os.path.join(model_path, "tmp") os.makedirs(tmp_model_path, exist_ok=True) params = read_json(os.path.join(input_base_path, "params.json")) num_shards = NUM_SHARDS[model_size] params = params.get("model", params) n_layers = params["n_layers"] n_heads = params["n_heads"] n_heads_per_shard = n_heads // num_shards dim = params["dim"] dims_per_head = dim // n_heads base = params.get("rope_theta", 10000.0) inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) if base > 10000.0: max_position_embeddings = 16384 else: max_position_embeddings = 2048 tokenizer_class = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast if tokenizer_path is not None: tokenizer = tokenizer_class(tokenizer_path) tokenizer.save_pretrained(model_path) vocab_size = tokenizer.vocab_size if tokenizer_path is not None else 32000 if params.get("n_kv_heads", None) is not None: num_key_value_heads = params["n_kv_heads"] # for GQA / MQA num_local_key_value_heads = n_heads_per_shard // num_key_value_heads key_value_dim = dim // num_key_value_heads else: # compatibility with other checkpoints num_key_value_heads = n_heads num_local_key_value_heads = n_heads_per_shard key_value_dim = dim # permute for sliced rotary def permute(w, n_heads=n_heads, dim1=dim, dim2=dim): return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2) print(f"Fetching all parameters from the checkpoint at {input_base_path}.") # Load weights if num_shards == 1: # Not sharded # (The sharded implementation would also work, but this is simpler.) loaded = torch.load(os.path.join(input_base_path, "consolidated.00.pth"), map_location="cpu") else: # Sharded loaded = [ torch.load(os.path.join(input_base_path, f"consolidated.{i:02d}.pth"), map_location="cpu") for i in range(num_shards) ] param_count = 0 index_dict = {"weight_map": {}} for layer_i in range(n_layers): filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin" if num_shards == 1: # Unsharded state_dict = { f"model.layers.{layer_i}.self_attn.q_proj.weight": permute( loaded[f"layers.{layer_i}.attention.wq.weight"] ), f"model.layers.{layer_i}.self_attn.k_proj.weight": permute( loaded[f"layers.{layer_i}.attention.wk.weight"] ), f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"], f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"], f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"], f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"], f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"], f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"], f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. state_dict = { f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][ f"layers.{layer_i}.attention_norm.weight" ].clone(), f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][ f"layers.{layer_i}.ffn_norm.weight" ].clone(), } state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute( torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim) for i in range(num_shards) ], dim=0, ).reshape(dim, dim) ) state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute( torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wk.weight"].view( num_local_key_value_heads, dims_per_head, dim ) for i in range(num_shards) ], dim=0, ).reshape(key_value_dim, dim), num_key_value_heads, key_value_dim, dim, ) state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wv.weight"].view( num_local_key_value_heads, dims_per_head, dim ) for i in range(num_shards) ], dim=0, ).reshape(key_value_dim, dim) state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(num_shards)], dim=1 ) state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(num_shards)], dim=0 ) state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(num_shards)], dim=1 ) state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(num_shards)], dim=0 ) state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq for k, v in state_dict.items(): index_dict["weight_map"][k] = filename param_count += v.numel() torch.save(state_dict, os.path.join(tmp_model_path, filename)) filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin" if num_shards == 1: # Unsharded state_dict = { "model.embed_tokens.weight": loaded["tok_embeddings.weight"], "model.norm.weight": loaded["norm.weight"], "lm_head.weight": loaded["output.weight"], } else: state_dict = { "model.norm.weight": loaded[0]["norm.weight"], "model.embed_tokens.weight": torch.cat( [loaded[i]["tok_embeddings.weight"] for i in range(num_shards)], dim=1 ), "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(num_shards)], dim=0), } for k, v in state_dict.items(): index_dict["weight_map"][k] = filename param_count += v.numel() torch.save(state_dict, os.path.join(tmp_model_path, filename)) # Write configs index_dict["metadata"] = {"total_size": param_count * 2} write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json")) ffn_dim_multiplier = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1 multiple_of = params["multiple_of"] if "multiple_of" in params else 256 config = LlamaConfig( hidden_size=dim, intermediate_size=compute_intermediate_size(dim, ffn_dim_multiplier, multiple_of), num_attention_heads=params["n_heads"], num_hidden_layers=params["n_layers"], rms_norm_eps=params["norm_eps"], num_key_value_heads=num_key_value_heads, vocab_size=vocab_size, rope_theta=base, max_position_embeddings=max_position_embeddings, ) config.save_pretrained(tmp_model_path) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print("Loading the checkpoint in a Llama model.") model = LlamaForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True) # Avoid saving this as part of the config. del model.config._name_or_path model.config.torch_dtype = torch.float16 print("Saving in the Transformers format.") model.save_pretrained(model_path, safe_serialization=safe_serialization) shutil.rmtree(tmp_model_path) def write_tokenizer(tokenizer_path, input_tokenizer_path): # Initialize the tokenizer based on the `spm` model tokenizer_class = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}.") tokenizer = tokenizer_class(input_tokenizer_path) tokenizer.save_pretrained(tokenizer_path) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--input_dir", help="Location of LLaMA weights, which contains tokenizer.model and model folders", ) parser.add_argument( "--model_size", choices=["7B", "7Bf", "13B", "13Bf", "30B", "34B", "65B", "70B", "70Bf", "tokenizer_only"], help="'f' models correspond to the finetuned versions, and are specific to the Llama2 official release. For more details on Llama2, checkout the original repo: https://huggingface.co/meta-llama", ) parser.add_argument( "--output_dir", help="Location to write HF model and tokenizer", ) parser.add_argument("--safe_serialization", type=bool, help="Whether or not to save using `safetensors`.") args = parser.parse_args() spm_path = os.path.join(args.input_dir, "tokenizer.model") if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir, input_base_path=args.input_dir, model_size=args.model_size, safe_serialization=args.safe_serialization, tokenizer_path=spm_path, ) else: write_tokenizer(args.output_dir, spm_path) if __name__ == "__main__": main()
transformers/src/transformers/models/llama/convert_llama_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/llama/convert_llama_weights_to_hf.py", "repo_id": "transformers", "token_count": 6089 }
344
# coding=utf-8 # Copyright 2021, The Microsoft Research Asia MarkupLM Team authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ MarkupLM model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class MarkupLMConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MarkupLMModel`]. It is used to instantiate a MarkupLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MarkupLM [microsoft/markuplm-base](https://huggingface.co/microsoft/markuplm-base) architecture. Configuration objects inherit from [`BertConfig`] and can be used to control the model outputs. Read the documentation from [`BertConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the MarkupLM model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`MarkupLMModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed into [`MarkupLMModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. max_tree_id_unit_embeddings (`int`, *optional*, defaults to 1024): The maximum value that the tree id unit embedding might ever use. Typically set this to something large just in case (e.g., 1024). max_xpath_tag_unit_embeddings (`int`, *optional*, defaults to 256): The maximum value that the xpath tag unit embedding might ever use. Typically set this to something large just in case (e.g., 256). max_xpath_subs_unit_embeddings (`int`, *optional*, defaults to 1024): The maximum value that the xpath subscript unit embedding might ever use. Typically set this to something large just in case (e.g., 1024). tag_pad_id (`int`, *optional*, defaults to 216): The id of the padding token in the xpath tags. subs_pad_id (`int`, *optional*, defaults to 1001): The id of the padding token in the xpath subscripts. xpath_tag_unit_hidden_size (`int`, *optional*, defaults to 32): The hidden size of each tree id unit. One complete tree index will have (50*xpath_tag_unit_hidden_size)-dim. max_depth (`int`, *optional*, defaults to 50): The maximum depth in xpath. Examples: ```python >>> from transformers import MarkupLMModel, MarkupLMConfig >>> # Initializing a MarkupLM microsoft/markuplm-base style configuration >>> configuration = MarkupLMConfig() >>> # Initializing a model from the microsoft/markuplm-base style configuration >>> model = MarkupLMModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "markuplm" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, bos_token_id=0, eos_token_id=2, max_xpath_tag_unit_embeddings=256, max_xpath_subs_unit_embeddings=1024, tag_pad_id=216, subs_pad_id=1001, xpath_unit_hidden_size=32, max_depth=50, position_embedding_type="absolute", use_cache=True, classifier_dropout=None, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs, ) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.classifier_dropout = classifier_dropout # additional properties self.max_depth = max_depth self.max_xpath_tag_unit_embeddings = max_xpath_tag_unit_embeddings self.max_xpath_subs_unit_embeddings = max_xpath_subs_unit_embeddings self.tag_pad_id = tag_pad_id self.subs_pad_id = subs_pad_id self.xpath_unit_hidden_size = xpath_unit_hidden_size
transformers/src/transformers/models/markuplm/configuration_markuplm.py/0
{ "file_path": "transformers/src/transformers/models/markuplm/configuration_markuplm.py", "repo_id": "transformers", "token_count": 2942 }
345
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert MaskFormer checkpoints with Swin backbone from the original repository. URL: https://github.com/facebookresearch/MaskFormer""" import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_maskformer_config(model_name: str): backbone_config = SwinConfig.from_pretrained( "microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"] ) config = MaskFormerConfig(backbone_config=backbone_config) repo_id = "huggingface/label-files" if "ade20k-full" in model_name: # this should be ok config.num_labels = 847 filename = "maskformer-ade20k-full-id2label.json" elif "ade" in model_name: # this should be ok config.num_labels = 150 filename = "ade20k-id2label.json" elif "coco-stuff" in model_name: # this should be ok config.num_labels = 171 filename = "maskformer-coco-stuff-id2label.json" elif "coco" in model_name: # TODO config.num_labels = 133 filename = "coco-panoptic-id2label.json" elif "cityscapes" in model_name: # this should be ok config.num_labels = 19 filename = "cityscapes-id2label.json" elif "vistas" in model_name: # this should be ok config.num_labels = 65 filename = "mapillary-vistas-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} return config def create_rename_keys(config): rename_keys = [] # stem # fmt: off rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight")) rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias")) rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight")) rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias")) # stages for i in range(len(config.backbone_config.depths)): for j in range(config.backbone_config.depths[i]): rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight")) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias")) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table")) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index")) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight")) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias")) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight")) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias")) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight")) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias")) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight")) rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias")) if i < 3: rename_keys.append((f"backbone.layers.{i}.downsample.reduction.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight")) rename_keys.append((f"backbone.layers.{i}.downsample.norm.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight")) rename_keys.append((f"backbone.layers.{i}.downsample.norm.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias")) rename_keys.append((f"backbone.norm{i}.weight", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight")) rename_keys.append((f"backbone.norm{i}.bias", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias")) # FPN rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight")) rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight")) rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias")) for source_index, target_index in zip(range(3, 0, -1), range(0, 3)): rename_keys.append((f"sem_seg_head.adapter_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight")) rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight")) rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias")) rename_keys.append((f"sem_seg_head.layer_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight")) rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight")) rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias")) rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight")) rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias")) # Transformer decoder for idx in range(config.decoder_config.decoder_layers): # self-attention out projection rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight")) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias")) # cross-attention out projection rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight")) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias")) # MLP 1 rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", f"model.transformer_module.decoder.layers.{idx}.fc1.weight")) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", f"model.transformer_module.decoder.layers.{idx}.fc1.bias")) # MLP 2 rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", f"model.transformer_module.decoder.layers.{idx}.fc2.weight")) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", f"model.transformer_module.decoder.layers.{idx}.fc2.bias")) # layernorm 1 (self-attention layernorm) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight")) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias")) # layernorm 2 (cross-attention layernorm) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight")) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias")) # layernorm 3 (final layernorm) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight")) rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias")) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight")) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias")) # heads on top rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight")) rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight")) rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias")) rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight")) rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias")) for i in range(3): rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.weight", f"mask_embedder.{i}.0.weight")) rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.bias", f"mask_embedder.{i}.0.bias")) # fmt: on return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # we split up the matrix of each encoder layer into queries, keys and values def read_in_swin_q_k_v(state_dict, backbone_config): num_features = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))] for i in range(len(backbone_config.depths)): dim = num_features[i] for j in range(backbone_config.depths[i]): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.query.weight"] = in_proj_weight[:dim, :] state_dict[f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.query.bias"] = in_proj_bias[: dim] state_dict[f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.key.weight"] = in_proj_weight[ dim : dim * 2, : ] state_dict[f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.key.bias"] = in_proj_bias[ dim : dim * 2 ] state_dict[f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.value.weight"] = in_proj_weight[ -dim :, : ] state_dict[f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.value.bias"] = in_proj_bias[-dim :] # fmt: on # we split up the matrix of each encoder layer into queries, keys and values def read_in_decoder_q_k_v(state_dict, config): # fmt: off hidden_size = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"model.transformer_module.decoder.layers.{idx}.self_attn.q_proj.weight"] = in_proj_weight[: hidden_size, :] state_dict[f"model.transformer_module.decoder.layers.{idx}.self_attn.q_proj.bias"] = in_proj_bias[:config.hidden_size] state_dict[f"model.transformer_module.decoder.layers.{idx}.self_attn.k_proj.weight"] = in_proj_weight[hidden_size : hidden_size * 2, :] state_dict[f"model.transformer_module.decoder.layers.{idx}.self_attn.k_proj.bias"] = in_proj_bias[hidden_size : hidden_size * 2] state_dict[f"model.transformer_module.decoder.layers.{idx}.self_attn.v_proj.weight"] = in_proj_weight[-hidden_size :, :] state_dict[f"model.transformer_module.decoder.layers.{idx}.self_attn.v_proj.bias"] = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"model.transformer_module.decoder.layers.{idx}.encoder_attn.q_proj.weight"] = in_proj_weight[: hidden_size, :] state_dict[f"model.transformer_module.decoder.layers.{idx}.encoder_attn.q_proj.bias"] = in_proj_bias[:config.hidden_size] state_dict[f"model.transformer_module.decoder.layers.{idx}.encoder_attn.k_proj.weight"] = in_proj_weight[hidden_size : hidden_size * 2, :] state_dict[f"model.transformer_module.decoder.layers.{idx}.encoder_attn.k_proj.bias"] = in_proj_bias[hidden_size : hidden_size * 2] state_dict[f"model.transformer_module.decoder.layers.{idx}.encoder_attn.v_proj.weight"] = in_proj_weight[-hidden_size :, :] state_dict[f"model.transformer_module.decoder.layers.{idx}.encoder_attn.v_proj.bias"] = in_proj_bias[-hidden_size :] # fmt: on # We will verify our results on an image of cute cats def prepare_img() -> torch.Tensor: url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_maskformer_checkpoint( model_name: str, checkpoint_path: str, pytorch_dump_folder_path: str, push_to_hub: bool = False ): """ Copy/paste/tweak model's weights to our MaskFormer structure. """ config = get_maskformer_config(model_name) # load original state_dict with open(checkpoint_path, "rb") as f: data = pickle.load(f) state_dict = data["model"] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys rename_keys = create_rename_keys(config) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_swin_q_k_v(state_dict, config.backbone_config) read_in_decoder_q_k_v(state_dict, config) # update to torch tensors for key, value in state_dict.items(): state_dict[key] = torch.from_numpy(value) # load 🤗 model model = MaskFormerForInstanceSegmentation(config) model.eval() for name, param in model.named_parameters(): print(name, param.shape) missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(unexpected_keys) == 0, f"Unexpected keys: {unexpected_keys}" # verify results image = prepare_img() if "vistas" in model_name: ignore_index = 65 elif "cityscapes" in model_name: ignore_index = 65535 else: ignore_index = 255 reduce_labels = True if "ade" in model_name else False image_processor = MaskFormerImageProcessor(ignore_index=ignore_index, reduce_labels=reduce_labels) inputs = image_processor(image, return_tensors="pt") outputs = model(**inputs) print("Logits:", outputs.class_queries_logits[0, :3, :3]) if model_name == "maskformer-swin-tiny-ade": expected_logits = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3], expected_logits, atol=1e-4) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model and image processor to {pytorch_dump_folder_path}") Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print("Pushing model and image processor to the hub...") model.push_to_hub(f"nielsr/{model_name}") image_processor.push_to_hub(f"nielsr/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="maskformer-swin-tiny-ade", type=str, help=("Name of the MaskFormer model you'd like to convert",), ) parser.add_argument( "--checkpoint_path", default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl", type=str, help="Path to the original state dict (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
transformers/src/transformers/models/maskformer/convert_maskformer_swin_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/maskformer/convert_maskformer_swin_to_pytorch.py", "repo_id": "transformers", "token_count": 8473 }
346
# Copyright 2023 Mistral AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _import_structure = { "configuration_mistral": ["MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP", "MistralConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_mistral"] = [ "MistralForCausalLM", "MistralModel", "MistralPreTrainedModel", "MistralForSequenceClassification", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_mistral"] = [ "FlaxMistralForCausalLM", "FlaxMistralModel", "FlaxMistralPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mistral import MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP, MistralConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mistral import ( MistralForCausalLM, MistralForSequenceClassification, MistralModel, MistralPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mistral import ( FlaxMistralForCausalLM, FlaxMistralModel, FlaxMistralPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/mistral/__init__.py/0
{ "file_path": "transformers/src/transformers/models/mistral/__init__.py", "repo_id": "transformers", "token_count": 935 }
347
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 MobileBERT model.""" from __future__ import annotations import warnings from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPooling, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFNextSentencePredictorOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFNextSentencePredictionLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_mobilebert import MobileBertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/mobilebert-uncased" _CONFIG_FOR_DOC = "MobileBertConfig" # TokenClassification docstring _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "vumichien/mobilebert-finetuned-ner" _TOKEN_CLASS_EXPECTED_OUTPUT = "['I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC']" _TOKEN_CLASS_EXPECTED_LOSS = 0.03 # QuestionAnswering docstring _CHECKPOINT_FOR_QA = "vumichien/mobilebert-uncased-squad-v2" _QA_EXPECTED_OUTPUT = "'a nice puppet'" _QA_EXPECTED_LOSS = 3.98 _QA_TARGET_START_INDEX = 12 _QA_TARGET_END_INDEX = 13 # SequenceClassification docstring _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "vumichien/emo-mobilebert" _SEQ_CLASS_EXPECTED_OUTPUT = "'others'" _SEQ_CLASS_EXPECTED_LOSS = "4.72" TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/mobilebert-uncased", # See all MobileBERT models at https://huggingface.co/models?filter=mobilebert ] # Copied from transformers.models.bert.modeling_tf_bert.TFBertPreTrainingLoss class TFMobileBertPreTrainingLoss: """ Loss function suitable for BERT-like pretraining, that is, the task of pretraining a language model by combining NSP + MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor: loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels["labels"]), y_pred=logits[0]) # make sure only labels that are not equal to -100 # are taken into account for the loss computation lm_loss_mask = tf.cast(labels["labels"] != -100, dtype=unmasked_lm_losses.dtype) masked_lm_losses = unmasked_lm_losses * lm_loss_mask reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask) # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_ns_loss = loss_fn(y_true=tf.nn.relu(labels["next_sentence_label"]), y_pred=logits[1]) ns_loss_mask = tf.cast(labels["next_sentence_label"] != -100, dtype=unmasked_ns_loss.dtype) masked_ns_loss = unmasked_ns_loss * ns_loss_mask reduced_masked_ns_loss = tf.reduce_sum(masked_ns_loss) / tf.reduce_sum(ns_loss_mask) return tf.reshape(reduced_masked_lm_loss + reduced_masked_ns_loss, (1,)) class TFMobileBertIntermediate(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(config.intermediate_size, name="dense") if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.true_hidden_size]) class TFLayerNorm(keras.layers.LayerNormalization): def __init__(self, feat_size, *args, **kwargs): self.feat_size = feat_size super().__init__(*args, **kwargs) def build(self, input_shape=None): super().build([None, None, self.feat_size]) class TFNoNorm(keras.layers.Layer): def __init__(self, feat_size, epsilon=None, **kwargs): super().__init__(**kwargs) self.feat_size = feat_size def build(self, input_shape): self.bias = self.add_weight("bias", shape=[self.feat_size], initializer="zeros") self.weight = self.add_weight("weight", shape=[self.feat_size], initializer="ones") super().build(input_shape) def call(self, inputs: tf.Tensor): return inputs * self.weight + self.bias NORM2FN = {"layer_norm": TFLayerNorm, "no_norm": TFNoNorm} class TFMobileBertEmbeddings(keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.trigram_input = config.trigram_input self.embedding_size = config.embedding_size self.config = config self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.embedding_transformation = keras.layers.Dense(config.hidden_size, name="embedding_transformation") # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = NORM2FN[config.normalization_type]( config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm" ) self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.embedded_input_size = self.embedding_size * (3 if self.trigram_input else 1) def build(self, input_shape=None): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.embedding_size], initializer=get_initializer(initializer_range=self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.config.type_vocab_size, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) if self.built: return self.built = True if getattr(self, "embedding_transformation", None) is not None: with tf.name_scope(self.embedding_transformation.name): self.embedding_transformation.build([None, None, self.embedded_input_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build(None) def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, training=False): """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if self.trigram_input: # From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited # Devices (https://arxiv.org/abs/2004.02984) # # The embedding table in BERT models accounts for a substantial proportion of model size. To compress # the embedding layer, we reduce the embedding dimension to 128 in MobileBERT. # Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512 # dimensional output. inputs_embeds = tf.concat( [ tf.pad(inputs_embeds[:, 1:], ((0, 0), (0, 1), (0, 0))), inputs_embeds, tf.pad(inputs_embeds[:, :-1], ((0, 0), (1, 0), (0, 0))), ], axis=2, ) if self.trigram_input or self.embedding_size != self.hidden_size: inputs_embeds = self.embedding_transformation(inputs_embeds) if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFMobileBertSelfAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads}" ) self.num_attention_heads = config.num_attention_heads self.output_attentions = config.output_attentions assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.config = config def transpose_for_scores(self, x, batch_size): # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call( self, query_tensor, key_tensor, value_tensor, attention_mask, head_mask, output_attentions, training=False ): batch_size = shape_list(attention_mask)[0] mixed_query_layer = self.query(query_tensor) mixed_key_layer = self.key(key_tensor) mixed_value_layer = self.value(value_tensor) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul( query_layer, key_layer, transpose_b=True ) # (batch size, num_heads, seq_len_q, seq_len_k) dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFMobileBertModel call() function) attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = stable_softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape( context_layer, (batch_size, -1, self.all_head_size) ) # (batch_size, seq_len_q, all_head_size) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.true_hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.true_hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build( [ None, None, self.config.true_hidden_size if self.config.use_bottleneck_attention else self.config.hidden_size, ] ) class TFMobileBertSelfOutput(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.use_bottleneck = config.use_bottleneck self.dense = keras.layers.Dense( config.true_hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = NORM2FN[config.normalization_type]( config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm" ) if not self.use_bottleneck: self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, residual_tensor, training=False): hidden_states = self.dense(hidden_states) if not self.use_bottleneck: hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + residual_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.true_hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build(None) class TFMobileBertAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.self = TFMobileBertSelfAttention(config, name="self") self.mobilebert_output = TFMobileBertSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call( self, query_tensor, key_tensor, value_tensor, layer_input, attention_mask, head_mask, output_attentions, training=False, ): self_outputs = self.self( query_tensor, key_tensor, value_tensor, attention_mask, head_mask, output_attentions, training=training ) attention_output = self.mobilebert_output(self_outputs[0], layer_input, training=training) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self", None) is not None: with tf.name_scope(self.self.name): self.self.build(None) if getattr(self, "mobilebert_output", None) is not None: with tf.name_scope(self.mobilebert_output.name): self.mobilebert_output.build(None) class TFOutputBottleneck(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(config.hidden_size, name="dense") self.LayerNorm = NORM2FN[config.normalization_type]( config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm" ) self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, residual_tensor, training=False): layer_outputs = self.dense(hidden_states) layer_outputs = self.dropout(layer_outputs, training=training) layer_outputs = self.LayerNorm(layer_outputs + residual_tensor) return layer_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.true_hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build(None) class TFMobileBertOutput(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.use_bottleneck = config.use_bottleneck self.dense = keras.layers.Dense( config.true_hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = NORM2FN[config.normalization_type]( config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm" ) if not self.use_bottleneck: self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) else: self.bottleneck = TFOutputBottleneck(config, name="bottleneck") self.config = config def call(self, hidden_states, residual_tensor_1, residual_tensor_2, training=False): hidden_states = self.dense(hidden_states) if not self.use_bottleneck: hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + residual_tensor_1) else: hidden_states = self.LayerNorm(hidden_states + residual_tensor_1) hidden_states = self.bottleneck(hidden_states, residual_tensor_2) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build(None) if getattr(self, "bottleneck", None) is not None: with tf.name_scope(self.bottleneck.name): self.bottleneck.build(None) class TFBottleneckLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(config.intra_bottleneck_size, name="dense") self.LayerNorm = NORM2FN[config.normalization_type]( config.intra_bottleneck_size, epsilon=config.layer_norm_eps, name="LayerNorm" ) self.config = config def call(self, inputs): hidden_states = self.dense(inputs) hidden_states = self.LayerNorm(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build(None) class TFBottleneck(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.key_query_shared_bottleneck = config.key_query_shared_bottleneck self.use_bottleneck_attention = config.use_bottleneck_attention self.bottleneck_input = TFBottleneckLayer(config, name="input") if self.key_query_shared_bottleneck: self.attention = TFBottleneckLayer(config, name="attention") def call(self, hidden_states): # This method can return three different tuples of values. These different values make use of bottlenecks, # which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory # usage. These linear layer have weights that are learned during training. # # If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the # key, query, value, and "layer input" to be used by the attention layer. # This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor # in the attention self output, after the attention scores have been computed. # # If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return # four values, three of which have been passed through a bottleneck: the query and key, passed through the same # bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck. # # Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck, # and the residual layer will be this value passed through a bottleneck. bottlenecked_hidden_states = self.bottleneck_input(hidden_states) if self.use_bottleneck_attention: return (bottlenecked_hidden_states,) * 4 elif self.key_query_shared_bottleneck: shared_attention_input = self.attention(hidden_states) return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states) else: return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "bottleneck_input", None) is not None: with tf.name_scope(self.bottleneck_input.name): self.bottleneck_input.build(None) if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) class TFFFNOutput(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(config.true_hidden_size, name="dense") self.LayerNorm = NORM2FN[config.normalization_type]( config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm" ) self.config = config def call(self, hidden_states, residual_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.LayerNorm(hidden_states + residual_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build(None) class TFFFNLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.intermediate = TFMobileBertIntermediate(config, name="intermediate") self.mobilebert_output = TFFFNOutput(config, name="output") def call(self, hidden_states): intermediate_output = self.intermediate(hidden_states) layer_outputs = self.mobilebert_output(intermediate_output, hidden_states) return layer_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "mobilebert_output", None) is not None: with tf.name_scope(self.mobilebert_output.name): self.mobilebert_output.build(None) class TFMobileBertLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.use_bottleneck = config.use_bottleneck self.num_feedforward_networks = config.num_feedforward_networks self.attention = TFMobileBertAttention(config, name="attention") self.intermediate = TFMobileBertIntermediate(config, name="intermediate") self.mobilebert_output = TFMobileBertOutput(config, name="output") if self.use_bottleneck: self.bottleneck = TFBottleneck(config, name="bottleneck") if config.num_feedforward_networks > 1: self.ffn = [TFFFNLayer(config, name=f"ffn.{i}") for i in range(config.num_feedforward_networks - 1)] def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False): if self.use_bottleneck: query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states) else: query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4 attention_outputs = self.attention( query_tensor, key_tensor, value_tensor, layer_input, attention_mask, head_mask, output_attentions, training=training, ) attention_output = attention_outputs[0] s = (attention_output,) if self.num_feedforward_networks != 1: for i, ffn_module in enumerate(self.ffn): attention_output = ffn_module(attention_output) s += (attention_output,) intermediate_output = self.intermediate(attention_output) layer_output = self.mobilebert_output(intermediate_output, attention_output, hidden_states, training=training) outputs = ( (layer_output,) + attention_outputs[1:] + ( tf.constant(0), query_tensor, key_tensor, value_tensor, layer_input, attention_output, intermediate_output, ) + s ) # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "mobilebert_output", None) is not None: with tf.name_scope(self.mobilebert_output.name): self.mobilebert_output.build(None) if getattr(self, "bottleneck", None) is not None: with tf.name_scope(self.bottleneck.name): self.bottleneck.build(None) if getattr(self, "ffn", None) is not None: for layer in self.ffn: with tf.name_scope(layer.name): layer.build(None) class TFMobileBertEncoder(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.layer = [TFMobileBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states, attention_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], output_attentions, training=training ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) class TFMobileBertPooler(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.do_activate = config.classifier_activation if self.do_activate: self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.config = config def call(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] if not self.do_activate: return first_token_tensor else: pooled_output = self.dense(first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFMobileBertPredictionHeadTransform(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build(None) class TFMobileBertLMPredictionHead(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.transform = TFMobileBertPredictionHeadTransform(config, name="transform") self.config = config def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") self.dense = self.add_weight( shape=(self.config.hidden_size - self.config.embedding_size, self.config.vocab_size), initializer="zeros", trainable=True, name="dense/weight", ) self.decoder = self.add_weight( shape=(self.config.vocab_size, self.config.embedding_size), initializer="zeros", trainable=True, name="decoder/weight", ) if self.built: return self.built = True if getattr(self, "transform", None) is not None: with tf.name_scope(self.transform.name): self.transform.build(None) def get_output_embeddings(self): return self def set_output_embeddings(self, value): self.decoder = value self.config.vocab_size = shape_list(value)[0] def get_bias(self): return {"bias": self.bias} def set_bias(self, value): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = tf.matmul(hidden_states, tf.concat([tf.transpose(self.decoder), self.dense], axis=0)) hidden_states = hidden_states + self.bias return hidden_states class TFMobileBertMLMHead(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.predictions = TFMobileBertLMPredictionHead(config, name="predictions") def call(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) @keras_serializable class TFMobileBertMainLayer(keras.layers.Layer): config_class = MobileBertConfig def __init__(self, config, add_pooling_layer=True, **kwargs): super().__init__(**kwargs) self.config = config self.num_hidden_layers = config.num_hidden_layers self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.embeddings = TFMobileBertEmbeddings(config, name="embeddings") self.encoder = TFMobileBertEncoder(config, name="encoder") self.pooler = TFMobileBertPooler(config, name="pooler") if add_pooling_layer else None def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) embedding_output = self.embeddings(input_ids, position_ids, token_type_ids, inputs_embeds, training=training) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers encoder_outputs = self.encoder( embedding_output, extended_attention_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFMobileBertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MobileBertConfig base_model_prefix = "mobilebert" @dataclass class TFMobileBertForPreTrainingOutput(ModelOutput): """ Output type of [`TFMobileBertForPreTraining`]. Args: prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (`tf.Tensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None prediction_logits: tf.Tensor = None seq_relationship_logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None MOBILEBERT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`MobileBertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MOBILEBERT_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.", MOBILEBERT_START_DOCSTRING, ) class TFMobileBertModel(TFMobileBertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert") @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[Tuple, TFBaseModelOutputWithPooling]: outputs = self.mobilebert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilebert", None) is not None: with tf.name_scope(self.mobilebert.name): self.mobilebert.build(None) @add_start_docstrings( """ MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next sentence prediction (classification)` head. """, MOBILEBERT_START_DOCSTRING, ) class TFMobileBertForPreTraining(TFMobileBertPreTrainedModel, TFMobileBertPreTrainingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert") self.predictions = TFMobileBertMLMHead(config, name="predictions___cls") self.seq_relationship = TFMobileBertOnlyNSPHead(config, name="seq_relationship___cls") def get_lm_head(self): return self.predictions.predictions def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.predictions.name + "/" + self.predictions.predictions.name @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFMobileBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, next_sentence_label: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple, TFMobileBertForPreTrainingOutput]: r""" Return: Examples: ```python >>> import tensorflow as tf >>> from transformers import AutoTokenizer, TFMobileBertForPreTraining >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased") >>> model = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased") >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 >>> outputs = model(input_ids) >>> prediction_scores, seq_relationship_scores = outputs[:2] ```""" outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output, pooled_output = outputs[:2] prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) total_loss = None if labels is not None and next_sentence_label is not None: d_labels = {"labels": labels} d_labels["next_sentence_label"] = next_sentence_label total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, seq_relationship_score)) if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return TFMobileBertForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilebert", None) is not None: with tf.name_scope(self.mobilebert.name): self.mobilebert.build(None) if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) if getattr(self, "seq_relationship", None) is not None: with tf.name_scope(self.seq_relationship.name): self.seq_relationship.build(None) def tf_to_pt_weight_rename(self, tf_weight): if tf_weight == "cls.predictions.decoder.weight": return tf_weight, "mobilebert.embeddings.word_embeddings.weight" else: return (tf_weight,) @add_start_docstrings("""MobileBert Model with a `language modeling` head on top.""", MOBILEBERT_START_DOCSTRING) class TFMobileBertForMaskedLM(TFMobileBertPreTrainedModel, TFMaskedLanguageModelingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [ r"pooler", r"seq_relationship___cls", r"cls.seq_relationship", ] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert") self.predictions = TFMobileBertMLMHead(config, name="predictions___cls") def get_lm_head(self): return self.predictions.predictions def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, expected_output="'paris'", expected_loss=0.57, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple, TFMaskedLMOutput]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels """ outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.predictions(sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilebert", None) is not None: with tf.name_scope(self.mobilebert.name): self.mobilebert.build(None) if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) def tf_to_pt_weight_rename(self, tf_weight): if tf_weight == "cls.predictions.decoder.weight": return tf_weight, "mobilebert.embeddings.word_embeddings.weight" else: return (tf_weight,) class TFMobileBertOnlyNSPHead(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.seq_relationship = keras.layers.Dense(2, name="seq_relationship") self.config = config def call(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "seq_relationship", None) is not None: with tf.name_scope(self.seq_relationship.name): self.seq_relationship.build([None, None, self.config.hidden_size]) @add_start_docstrings( """MobileBert Model with a `next sentence prediction (classification)` head on top.""", MOBILEBERT_START_DOCSTRING, ) class TFMobileBertForNextSentencePrediction(TFMobileBertPreTrainedModel, TFNextSentencePredictionLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"predictions___cls", r"cls.predictions"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert") self.cls = TFMobileBertOnlyNSPHead(config, name="seq_relationship___cls") @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, next_sentence_label: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple, TFNextSentencePredictorOutput]: r""" Return: Examples: ```python >>> import tensorflow as tf >>> from transformers import AutoTokenizer, TFMobileBertForNextSentencePrediction >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased") >>> model = TFMobileBertForNextSentencePrediction.from_pretrained("google/mobilebert-uncased") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." >>> encoding = tokenizer(prompt, next_sentence, return_tensors="tf") >>> logits = model(encoding["input_ids"], token_type_ids=encoding["token_type_ids"])[0] ```""" outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] seq_relationship_scores = self.cls(pooled_output) next_sentence_loss = ( None if next_sentence_label is None else self.hf_compute_loss(labels=next_sentence_label, logits=seq_relationship_scores) ) if not return_dict: output = (seq_relationship_scores,) + outputs[2:] return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output return TFNextSentencePredictorOutput( loss=next_sentence_loss, logits=seq_relationship_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilebert", None) is not None: with tf.name_scope(self.mobilebert.name): self.mobilebert.build(None) if getattr(self, "cls", None) is not None: with tf.name_scope(self.cls.name): self.cls.build(None) @add_start_docstrings( """ MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, MOBILEBERT_START_DOCSTRING, ) class TFMobileBertForSequenceClassification(TFMobileBertPreTrainedModel, TFSequenceClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [ r"predictions___cls", r"seq_relationship___cls", r"cls.predictions", r"cls.seq_relationship", ] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert") classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout(classifier_dropout) self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, expected_loss=_SEQ_CLASS_EXPECTED_LOSS, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple, TFSequenceClassifierOutput]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, training=training) logits = self.classifier(pooled_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilebert", None) is not None: with tf.name_scope(self.mobilebert.name): self.mobilebert.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, MOBILEBERT_START_DOCSTRING, ) class TFMobileBertForQuestionAnswering(TFMobileBertPreTrainedModel, TFQuestionAnsweringLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [ r"pooler", r"predictions___cls", r"seq_relationship___cls", r"cls.predictions", r"cls.seq_relationship", ] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert") self.qa_outputs = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_QA, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, qa_target_start_index=_QA_TARGET_START_INDEX, qa_target_end_index=_QA_TARGET_END_INDEX, expected_output=_QA_EXPECTED_OUTPUT, expected_loss=_QA_EXPECTED_LOSS, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, start_positions: np.ndarray | tf.Tensor | None = None, end_positions: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple, TFQuestionAnsweringModelOutput]: r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions, "end_position": end_positions} loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilebert", None) is not None: with tf.name_scope(self.mobilebert.name): self.mobilebert.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, MOBILEBERT_START_DOCSTRING, ) class TFMobileBertForMultipleChoice(TFMobileBertPreTrainedModel, TFMultipleChoiceLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [ r"predictions___cls", r"seq_relationship___cls", r"cls.predictions", r"cls.seq_relationship", ] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward( MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple, TFMultipleChoiceModelOutput]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None flat_inputs_embeds = ( tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None ) outputs = self.mobilebert( flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, flat_inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, training=training) logits = self.classifier(pooled_output) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilebert", None) is not None: with tf.name_scope(self.mobilebert.name): self.mobilebert.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, MOBILEBERT_START_DOCSTRING, ) class TFMobileBertForTokenClassification(TFMobileBertPreTrainedModel, TFTokenClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [ r"pooler", r"predictions___cls", r"seq_relationship___cls", r"cls.predictions", r"cls.seq_relationship", ] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert") classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout(classifier_dropout) self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT, expected_loss=_TOKEN_CLASS_EXPECTED_LOSS, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple, TFTokenClassifierOutput]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilebert", None) is not None: with tf.name_scope(self.mobilebert.name): self.mobilebert.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size])
transformers/src/transformers/models/mobilebert/modeling_tf_mobilebert.py/0
{ "file_path": "transformers/src/transformers/models/mobilebert/modeling_tf_mobilebert.py", "repo_id": "transformers", "token_count": 35828 }
348
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ MobileViT model configuration""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "apple/mobilevit-small": "https://huggingface.co/apple/mobilevit-small/resolve/main/config.json", "apple/mobilevit-x-small": "https://huggingface.co/apple/mobilevit-x-small/resolve/main/config.json", "apple/mobilevit-xx-small": "https://huggingface.co/apple/mobilevit-xx-small/resolve/main/config.json", "apple/deeplabv3-mobilevit-small": ( "https://huggingface.co/apple/deeplabv3-mobilevit-small/resolve/main/config.json" ), "apple/deeplabv3-mobilevit-x-small": ( "https://huggingface.co/apple/deeplabv3-mobilevit-x-small/resolve/main/config.json" ), "apple/deeplabv3-mobilevit-xx-small": ( "https://huggingface.co/apple/deeplabv3-mobilevit-xx-small/resolve/main/config.json" ), # See all MobileViT models at https://huggingface.co/models?filter=mobilevit } class MobileViTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MobileViTModel`]. It is used to instantiate a MobileViT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MobileViT [apple/mobilevit-small](https://huggingface.co/apple/mobilevit-small) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. image_size (`int`, *optional*, defaults to 256): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 2): The size (resolution) of each patch. hidden_sizes (`List[int]`, *optional*, defaults to `[144, 192, 240]`): Dimensionality (hidden size) of the Transformer encoders at each stage. neck_hidden_sizes (`List[int]`, *optional*, defaults to `[16, 32, 64, 96, 128, 160, 640]`): The number of channels for the feature maps of the backbone. num_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. mlp_ratio (`float`, *optional*, defaults to 2.0): The ratio of the number of channels in the output of the MLP to the number of channels in the input. expand_ratio (`float`, *optional*, defaults to 4.0): Expansion factor for the MobileNetv2 layers. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the Transformer encoder and convolution layers. conv_kernel_size (`int`, *optional*, defaults to 3): The size of the convolutional kernel in the MobileViT layer. output_stride (`int`, *optional*, defaults to 32): The ratio of the spatial resolution of the output to the resolution of the input image. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the Transformer encoder. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. classifier_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for attached classifiers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. aspp_out_channels (`int`, *optional*, defaults to 256): Number of output channels used in the ASPP layer for semantic segmentation. atrous_rates (`List[int]`, *optional*, defaults to `[6, 12, 18]`): Dilation (atrous) factors used in the ASPP layer for semantic segmentation. aspp_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the ASPP layer for semantic segmentation. semantic_loss_ignore_index (`int`, *optional*, defaults to 255): The index that is ignored by the loss function of the semantic segmentation model. Example: ```python >>> from transformers import MobileViTConfig, MobileViTModel >>> # Initializing a mobilevit-small style configuration >>> configuration = MobileViTConfig() >>> # Initializing a model from the mobilevit-small style configuration >>> model = MobileViTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mobilevit" def __init__( self, num_channels=3, image_size=256, patch_size=2, hidden_sizes=[144, 192, 240], neck_hidden_sizes=[16, 32, 64, 96, 128, 160, 640], num_attention_heads=4, mlp_ratio=2.0, expand_ratio=4.0, hidden_act="silu", conv_kernel_size=3, output_stride=32, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.0, classifier_dropout_prob=0.1, initializer_range=0.02, layer_norm_eps=1e-5, qkv_bias=True, aspp_out_channels=256, atrous_rates=[6, 12, 18], aspp_dropout_prob=0.1, semantic_loss_ignore_index=255, **kwargs, ): super().__init__(**kwargs) self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_sizes = hidden_sizes self.neck_hidden_sizes = neck_hidden_sizes self.num_attention_heads = num_attention_heads self.mlp_ratio = mlp_ratio self.expand_ratio = expand_ratio self.hidden_act = hidden_act self.conv_kernel_size = conv_kernel_size self.output_stride = output_stride self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.classifier_dropout_prob = classifier_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias # decode head attributes for semantic segmentation self.aspp_out_channels = aspp_out_channels self.atrous_rates = atrous_rates self.aspp_dropout_prob = aspp_dropout_prob self.semantic_loss_ignore_index = semantic_loss_ignore_index class MobileViTOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"})]) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})]) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})]) @property def atol_for_validation(self) -> float: return 1e-4
transformers/src/transformers/models/mobilevit/configuration_mobilevit.py/0
{ "file_path": "transformers/src/transformers/models/mobilevit/configuration_mobilevit.py", "repo_id": "transformers", "token_count": 3268 }
349
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Text/audio processor class for MusicGen """ from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class MusicgenProcessor(ProcessorMixin): r""" Constructs a MusicGen processor which wraps an EnCodec feature extractor and a T5 tokenizer into a single processor class. [`MusicgenProcessor`] offers all the functionalities of [`EncodecFeatureExtractor`] and [`TTokenizer`]. See [`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information. Args: feature_extractor (`EncodecFeatureExtractor`): An instance of [`EncodecFeatureExtractor`]. The feature extractor is a required input. tokenizer (`T5Tokenizer`): An instance of [`T5Tokenizer`]. The tokenizer is a required input. """ feature_extractor_class = "EncodecFeatureExtractor" tokenizer_class = ("T5Tokenizer", "T5TokenizerFast") def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) self.current_processor = self.feature_extractor self._in_target_context_manager = False def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True): return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps) def __call__(self, *args, **kwargs): """ Forwards the `audio` argument to EncodecFeatureExtractor's [`~EncodecFeatureExtractor.__call__`] and the `text` argument to [`~T5Tokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information. """ # For backward compatibility if self._in_target_context_manager: return self.current_processor(*args, **kwargs) audio = kwargs.pop("audio", None) sampling_rate = kwargs.pop("sampling_rate", None) text = kwargs.pop("text", None) if len(args) > 0: audio = args[0] args = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if text is not None: inputs = self.tokenizer(text, **kwargs) if audio is not None: audio_inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) if audio is None: return inputs elif text is None: return audio_inputs else: inputs["input_values"] = audio_inputs["input_values"] if "padding_mask" in audio_inputs: inputs["padding_mask"] = audio_inputs["padding_mask"] return inputs def batch_decode(self, *args, **kwargs): """ This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ audio_values = kwargs.pop("audio", None) padding_mask = kwargs.pop("padding_mask", None) if len(args) > 0: audio_values = args[0] args = args[1:] if audio_values is not None: return self._decode_audio(audio_values, padding_mask=padding_mask) else: return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to T5Tokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) def _decode_audio(self, audio_values, padding_mask: Optional = None) -> List[np.ndarray]: """ This method strips any padding from the audio values to return a list of numpy audio arrays. """ audio_values = to_numpy(audio_values) bsz, channels, seq_len = audio_values.shape if padding_mask is None: return list(audio_values) padding_mask = to_numpy(padding_mask) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) difference = seq_len - padding_mask.shape[-1] padding_value = 1 - self.feature_extractor.padding_value padding_mask = np.pad(padding_mask, ((0, 0), (0, difference)), "constant", constant_values=padding_value) audio_values = audio_values.tolist() for i in range(bsz): sliced_audio = np.asarray(audio_values[i])[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] audio_values[i] = sliced_audio.reshape(channels, -1) return audio_values
transformers/src/transformers/models/musicgen/processing_musicgen.py/0
{ "file_path": "transformers/src/transformers/models/musicgen/processing_musicgen.py", "repo_id": "transformers", "token_count": 2176 }
350
# coding=utf-8 # Copyright 2023, HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ NLLB-MoE model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json", } class NllbMoeConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`NllbMoeModel`]. It is used to instantiate an NLLB-MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the NLLB-MoE [facebook/nllb-moe-54b](https://huggingface.co/facebook/nllb-moe-54b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the NllbMoe model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`NllbMoeModel`] or d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in encoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. second_expert_policy ( `str`, *optional*, default to `"all"`): The policy used for the sampling the probability of being sampled to a second expert for each token. normalize_router_prob_before_dropping (`bool`, *optional*, defaults to `True`): Whether or not to normalize the router probabilities before applying a mask based on the experts capacity (capacity dropping). batch_prioritized_routing (`bool`, *optional*, defaults to `True`): Whether or not to orders the tokens by their router probabilities before capacity dropping. This means that the tokens that have the highest probabilities will be routed before other tokens that might be further in the sequence. moe_eval_capacity_token_fraction (`float`, *optional*, defaults to 1.0): Fraction of tokens as capacity during validation, if set to negative, uses the same as training. Should be in range: (0.0, 1.0]. num_experts (`int`, *optional*, defaults to 128): Number of experts for each NllbMoeSparseMlp layer. expert_capacity (`int`, *optional*, defaults to 64): Number of tokens that can be stored in each expert. encoder_sparse_step (`int`, *optional*, defaults to 4): Frequency of the sparse layers in the encoder. 4 means that one out of 4 layers will be sparse. decoder_sparse_step (`int`, *optional*, defaults to 4): Frequency of the sparse layers in the decoder. 4 means that one out of 4 layers will be sparse. router_dtype (`str`, *optional*, default to `"float32"`): The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the *selective precision* discussion in [the paper](https://arxiv.org/abs/2101.03961). router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`): Whether to ignore padding tokens when routing. if `False`, the padding tokens are not routed to any experts. router_bias (`bool`, *optional*, defaults to `False`): Whether or not the classifier of the router should have a bias. moe_token_dropout (`float`, *optional*, defualt ot 0.2): Masking rate for MoE expert output masking (EOM), which is implemented via a Dropout2d on the expert outputs. output_router_logits (`bool`, *optional*, defaults to `False`): Whether or not to return the router logits. Only set to `True` to get the auxiliary loss when training. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Example: ```python >>> from transformers import NllbMoeModel, NllbMoeConfig >>> # Initializing a NllbMoe facebook/nllb-moe-54b style configuration >>> configuration = NllbMoeConfig() >>> # Initializing a model from the facebook/nllb-moe-54b style configuration >>> model = NllbMoeModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "nllb-moe" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self, vocab_size=128112, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.05, decoder_layerdrop=0.05, use_cache=True, is_encoder_decoder=True, activation_function="relu", d_model=1024, dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, scale_embedding=True, router_bias=False, router_dtype="float32", router_ignore_padding_tokens=False, num_experts=128, expert_capacity=64, encoder_sparse_step=4, decoder_sparse_step=4, router_z_loss_coef=0.001, router_aux_loss_coef=0.001, second_expert_policy="all", normalize_router_prob_before_dropping=False, batch_prioritized_routing=False, moe_eval_capacity_token_fraction=1.0, moe_token_dropout=0.2, pad_token_id=1, bos_token_id=0, eos_token_id=2, output_router_logits=False, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.router_z_loss_coef = router_z_loss_coef self.router_aux_loss_coef = router_aux_loss_coef self.decoder_sparse_step = decoder_sparse_step self.encoder_sparse_step = encoder_sparse_step self.num_experts = num_experts self.expert_capacity = expert_capacity self.router_bias = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}") self.router_dtype = router_dtype self.router_ignore_padding_tokens = router_ignore_padding_tokens self.batch_prioritized_routing = batch_prioritized_routing self.second_expert_policy = second_expert_policy self.normalize_router_prob_before_dropping = normalize_router_prob_before_dropping self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction self.moe_token_dropout = moe_token_dropout self.output_router_logits = output_router_logits super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs, )
transformers/src/transformers/models/nllb_moe/configuration_nllb_moe.py/0
{ "file_path": "transformers/src/transformers/models/nllb_moe/configuration_nllb_moe.py", "repo_id": "transformers", "token_count": 4486 }
351
# coding=utf-8 # Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch OneFormer model.""" import copy import math import warnings from dataclasses import dataclass from typing import Dict, List, Optional, Tuple import numpy as np import torch from torch import Tensor, nn from torch.cuda.amp import autocast from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_scipy_available, logging, replace_return_docstrings, requires_backends, ) from ...utils.backbone_utils import load_backbone from .configuration_oneformer import OneFormerConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "OneFormerConfig" _CHECKPOINT_FOR_DOC = "shi-labs/oneformer_ade20k_swin_tiny" ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "shi-labs/oneformer_ade20k_swin_tiny", # See all OneFormer models at https://huggingface.co/models?filter=oneformer ] if is_scipy_available(): from scipy.optimize import linear_sum_assignment def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) # Copied from transformers.models.deformable_detr.modeling_deformable_detr.multi_scale_deformable_attention def multi_scale_deformable_attention( value: Tensor, value_spatial_shapes: Tensor, sampling_locations: Tensor, attention_weights: Tensor ) -> Tensor: batch_size, _, num_heads, hidden_dim = value.shape _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape value_list = value.split([height.item() * width.item() for height, width in value_spatial_shapes], dim=1) sampling_grids = 2 * sampling_locations - 1 sampling_value_list = [] for level_id, (height, width) in enumerate(value_spatial_shapes): # batch_size, height*width, num_heads, hidden_dim # -> batch_size, height*width, num_heads*hidden_dim # -> batch_size, num_heads*hidden_dim, height*width # -> batch_size*num_heads, hidden_dim, height, width value_l_ = ( value_list[level_id].flatten(2).transpose(1, 2).reshape(batch_size * num_heads, hidden_dim, height, width) ) # batch_size, num_queries, num_heads, num_points, 2 # -> batch_size, num_heads, num_queries, num_points, 2 # -> batch_size*num_heads, num_queries, num_points, 2 sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1) # batch_size*num_heads, hidden_dim, num_queries, num_points sampling_value_l_ = nn.functional.grid_sample( value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False ) sampling_value_list.append(sampling_value_l_) # (batch_size, num_queries, num_heads, num_levels, num_points) # -> (batch_size, num_heads, num_queries, num_levels, num_points) # -> (batch_size, num_heads, 1, num_queries, num_levels*num_points) attention_weights = attention_weights.transpose(1, 2).reshape( batch_size * num_heads, 1, num_queries, num_levels * num_points ) output = ( (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) .sum(-1) .view(batch_size, num_heads * hidden_dim, num_queries) ) return output.transpose(1, 2).contiguous() # Copied from transformers.models.maskformer.modeling_maskformer.dice_loss def dice_loss(inputs: Tensor, labels: Tensor, num_masks: int) -> Tensor: r""" Compute the DICE loss, similar to generalized IOU for masks as follows: $$ \mathcal{L}_{\text{dice}(x, y) = 1 - \frac{2 * x \cap y }{x \cup y + 1}} $$ In practice, since `labels` is a binary mask, (only 0s and 1s), dice can be computed as follow $$ \mathcal{L}_{\text{dice}(x, y) = 1 - \frac{2 * x * y }{x + y + 1}} $$ Args: inputs (`torch.Tensor`): A tensor representing a mask. labels (`torch.Tensor`): A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs (0 for the negative class and 1 for the positive class). num_masks (`int`): The number of masks present in the current batch, used for normalization. Returns: `torch.Tensor`: The computed loss. """ probs = inputs.sigmoid().flatten(1) numerator = 2 * (probs * labels).sum(-1) denominator = probs.sum(-1) + labels.sum(-1) loss = 1 - (numerator + 1) / (denominator + 1) loss = loss.sum() / num_masks return loss # Copied from transformers.models.mask2former.modeling_mask2former.sigmoid_cross_entropy_loss def sigmoid_cross_entropy_loss(inputs: torch.Tensor, labels: torch.Tensor, num_masks: int) -> torch.Tensor: r""" Args: inputs (`torch.Tensor`): A float tensor of arbitrary shape. labels (`torch.Tensor`): A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs (0 for the negative class and 1 for the positive class). Returns: loss (`torch.Tensor`): The computed loss. """ criterion = nn.BCEWithLogitsLoss(reduction="none") cross_entropy_loss = criterion(inputs, labels) loss = cross_entropy_loss.mean(1).sum() / num_masks return loss # Copied from transformers.models.maskformer.modeling_maskformer.pair_wise_dice_loss def pair_wise_dice_loss(inputs: Tensor, labels: Tensor) -> Tensor: """ A pair wise version of the dice loss, see `dice_loss` for usage. Args: inputs (`torch.Tensor`): A tensor representing a mask labels (`torch.Tensor`): A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs (0 for the negative class and 1 for the positive class). Returns: `torch.Tensor`: The computed loss between each pairs. """ inputs = inputs.sigmoid().flatten(1) numerator = 2 * torch.matmul(inputs, labels.T) # using broadcasting to get a [num_queries, NUM_CLASSES] matrix denominator = inputs.sum(-1)[:, None] + labels.sum(-1)[None, :] loss = 1 - (numerator + 1) / (denominator + 1) return loss # Copied from transformers.models.mask2former.modeling_mask2former.pair_wise_sigmoid_cross_entropy_loss def pair_wise_sigmoid_cross_entropy_loss(inputs: torch.Tensor, labels: torch.Tensor) -> torch.Tensor: r""" A pair wise version of the cross entropy loss, see `sigmoid_cross_entropy_loss` for usage. Args: inputs (`torch.Tensor`): A tensor representing a mask. labels (`torch.Tensor`): A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs (0 for the negative class and 1 for the positive class). Returns: loss (`torch.Tensor`): The computed loss between each pairs. """ height_and_width = inputs.shape[1] criterion = nn.BCEWithLogitsLoss(reduction="none") cross_entropy_loss_pos = criterion(inputs, torch.ones_like(inputs)) cross_entropy_loss_neg = criterion(inputs, torch.zeros_like(inputs)) loss_pos = torch.matmul(cross_entropy_loss_pos, labels.T) loss_neg = torch.matmul(cross_entropy_loss_neg, (1 - labels).T) loss = loss_pos + loss_neg loss = loss / height_and_width return loss # Copied from transformers.models.mask2former.modeling_mask2former.sample_point def sample_point( input_features: torch.Tensor, point_coordinates: torch.Tensor, add_dim=False, **kwargs ) -> torch.Tensor: """ A wrapper around `torch.nn.functional.grid_sample` to support 3D point_coordinates tensors. Args: input_features (`torch.Tensor` of shape (batch_size, channels, height, width)): A tensor that contains features map on a height * width grid point_coordinates (`torch.Tensor` of shape (batch_size, num_points, 2) or (batch_size, grid_height, grid_width,: 2)): A tensor that contains [0, 1] * [0, 1] normalized point coordinates add_dim (`bool`): boolean value to keep track of added dimension Returns: point_features (`torch.Tensor` of shape (batch_size, channels, num_points) or (batch_size, channels, height_grid, width_grid): A tensor that contains features for points in `point_coordinates`. """ if point_coordinates.dim() == 3: add_dim = True point_coordinates = point_coordinates.unsqueeze(2) # use nn.function.grid_sample to get features for points in `point_coordinates` via bilinear interpolation point_features = torch.nn.functional.grid_sample(input_features, 2.0 * point_coordinates - 1.0, **kwargs) if add_dim: point_features = point_features.squeeze(3) return point_features # Refactored from https://github.com/SHI-Labs/OneFormer/blob/33ebb56ed34f970a30ae103e786c0cb64c653d9a/oneformer/modeling/matcher.py#L93 class OneFormerHungarianMatcher(nn.Module): def __init__( self, cost_class: float = 1.0, cost_mask: float = 1.0, cost_dice: float = 1.0, num_points: int = 12544 ): """This class computes an assignment between the labels and the predictions of the network. For efficiency reasons, the labels don't include the no_object. Because of this, in general, there are more predictions than labels. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Params: cost_class (float, *optional*, defaults to 1.0): This is the relative weight of the classification error in the matching cost. cost_mask (float, *optional*, defaults to 1.0): This is the relative weight of the sigmoid ce loss of the binary mask in the matching cost. cost_dice (float, *optional*, defaults to 1.0): This is the relative weight of the dice loss of the binary mask in the matching cost num_points (int, *optional*, defaults to 12544): Number of points to be sampled for dice and mask loss matching cost. """ super().__init__() if cost_class == 0 and cost_mask == 0 and cost_dice == 0: raise ValueError("All costs cant be 0") self.cost_class = cost_class self.cost_mask = cost_mask self.cost_dice = cost_dice self.num_points = num_points @torch.no_grad() def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> List[Tuple[Tensor]]: """Performs the matching Params: masks_queries_logits (`torch.Tensor`): A tensor` of dim `batch_size, num_queries, num_labels` with the classification logits. class_queries_logits (`torch.Tensor`): A tensor` of dim `batch_size, num_queries, height, width` with the predicted masks. class_labels (`torch.Tensor`): A tensor` of dim `num_target_boxes` (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels. mask_labels (`torch.Tensor`): A tensor` of dim `num_target_boxes, height, width` containing the target masks. Returns: `List[Tuple[Tensor]]`: A list of size batch_size, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected labels (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_targets). """ indices: List[Tuple[np.array]] = [] num_queries = class_queries_logits.shape[1] preds_masks = masks_queries_logits preds_probs = class_queries_logits # iterate through batch size for pred_probs, pred_mask, target_mask, labels in zip(preds_probs, preds_masks, mask_labels, class_labels): pred_probs = pred_probs.softmax(-1) # Compute the classification cost. Contrary to the loss, we don't use the NLL, # but approximate it in 1 - proba[target class]. # The 1 is a constant that doesn't change the matching, it can be ommitted. cost_class = -pred_probs[:, labels] pred_mask = pred_mask[:, None] target_mask = target_mask[:, None].to(pred_mask.device) # all masks share the same set of points for efficient matching! point_coords = torch.rand(1, self.num_points, 2, device=pred_mask.device) # get ground truth labels target_mask = sample_point( target_mask, point_coords.repeat(target_mask.shape[0], 1, 1), align_corners=False, ).squeeze(1) pred_mask = sample_point( pred_mask, point_coords.repeat(pred_mask.shape[0], 1, 1), align_corners=False, ).squeeze(1) with autocast(enabled=False): pred_mask = pred_mask.float() target_mask = target_mask.float() # compute the sigmoid ce loss cost_mask = pair_wise_sigmoid_cross_entropy_loss(pred_mask, target_mask) # Compute the dice loss cost_dice = pair_wise_dice_loss(pred_mask, target_mask) # final cost matrix cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice cost_matrix = cost_matrix.reshape(num_queries, -1).cpu() # do the assigmented using the hungarian algorithm in scipy assigned_indices: Tuple[np.array] = linear_sum_assignment(cost_matrix.cpu()) indices.append(assigned_indices) # It could be stacked in one tensor matched_indices = [ (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices ] return matched_indices class OneFormerLoss(nn.Module): def __init__( self, num_classes: int, matcher: OneFormerHungarianMatcher, weight_dict: Dict[str, float], eos_coef: float, num_points: int, oversample_ratio: float, importance_sample_ratio: float, contrastive_temperature: float = None, ): """ This class computes the losses using the class predictions, mask predictions and the contrastive queries. Oneformer calculates the classification CE loss on the class predictions. Mask predictions are used for calculating the binary CE loss and dice loss. The contrastive queries are used for calculating the contrastive loss. Args: num_labels (`int`): The number of classes. matcher (`OneFormerHungarianMatcher`): A torch module that computes the assigments between the predictions and labels. weight_dict (`Dict[str, float]`): A dictionary of weights to be applied to the different losses. eos_coef (`float`): Weight to apply to the null class. num_points (`int`): Number of points to be sampled for dice and mask loss calculations. oversample_ratio (`float`): Required for pointwise loss calculation. importance_sample_ratio (`float`): Required for pointwise loss calculation. contrastive_temperature (`float`): Temperature for scaling the contrastive logits. """ requires_backends(self, ["scipy"]) super().__init__() self.num_classes = num_classes self.matcher = matcher self.weight_dict = weight_dict self.eos_coef = eos_coef empty_weight = torch.ones(self.num_classes + 1) empty_weight[-1] = self.eos_coef self.register_buffer("empty_weight", empty_weight) # pointwise mask loss parameters self.num_points = num_points self.oversample_ratio = oversample_ratio self.importance_sample_ratio = importance_sample_ratio self.contrastive_temperature = contrastive_temperature if self.contrastive_temperature is not None: self.logit_scale = nn.Parameter(torch.tensor(np.log(1 / contrastive_temperature))) def _max_by_axis(self, the_list: List[List[int]]) -> List[int]: maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes def _pad_images_to_max_in_batch(self, tensors: List[Tensor]) -> Tuple[Tensor, Tensor]: # get the maximum size in the batch max_size = self._max_by_axis([list(tensor.shape) for tensor in tensors]) batch_size = len(tensors) # compute finel size batch_shape = [batch_size] + max_size b, _, h, w = batch_shape # get metadata dtype = tensors[0].dtype device = tensors[0].device padded_tensors = torch.zeros(batch_shape, dtype=dtype, device=device) padding_masks = torch.ones((b, h, w), dtype=torch.bool, device=device) # pad the tensors to the size of the biggest one for tensor, padded_tensor, padding_mask in zip(tensors, padded_tensors, padding_masks): padded_tensor[: tensor.shape[0], : tensor.shape[1], : tensor.shape[2]].copy_(tensor) padding_mask[: tensor.shape[1], : tensor.shape[2]] = False return padded_tensors, padding_masks def loss_contrastive(self, contrastive_queries_logits: Tensor, text_queries: Tensor): """Compute the query-text contrastive loss. Args: contrastive_queries_logits (`torch.Tensor`): A tensor of shape `batch_size, num_queries, hidden_dim` text_queries (`torch.Tensor`): A tensor of shape `batch_size, num_queries, hidden_dim` Returns: `Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key: - **loss_contrastive** -- The query-text contrastive loss computed using task-guided queries and text queries derived from input text list. """ image_queries = contrastive_queries_logits.float() # [batch_size, hidden_dim] image_queries = nn.functional.normalize(image_queries.flatten(1), dim=-1) text_queries = nn.functional.normalize(text_queries.flatten(1), dim=-1) logit_scale = torch.clamp(self.logit_scale.exp(), max=100) logits_per_text = torch.matmul(text_queries, image_queries.t()) * logit_scale logits_per_img = logits_per_text.t() loss_img = nn.functional.cross_entropy( logits_per_img, torch.arange(len(logits_per_img), device=logits_per_text.device) ) loss_text = nn.functional.cross_entropy( logits_per_text, torch.arange(len(logits_per_text), device=logits_per_text.device) ) loss_contrastive = loss_img + loss_text losses = {"loss_contrastive": loss_contrastive} return losses def loss_labels( self, class_queries_logits: Tensor, class_labels: List[Tensor], indices: Tuple[np.array] ) -> Dict[str, Tensor]: """Compute the losses related to the labels using cross entropy. Args: class_queries_logits (`torch.Tensor`): A tensor of shape `batch_size, num_queries, num_labels` class_labels (`List[torch.Tensor]`): List of class labels of shape `(labels)`. indices (`Tuple[np.array])`: The indices computed by the Hungarian matcher. Returns: `Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key: - **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels. """ pred_logits = class_queries_logits batch_size, num_queries, _ = pred_logits.shape criterion = nn.CrossEntropyLoss(weight=self.empty_weight) idx = self._get_predictions_permutation_indices(indices) # shape = (batch_size, num_queries) target_classes_o = torch.cat([target[j] for target, (_, j) in zip(class_labels, indices)]) # shape = (batch_size, num_queries) target_classes = torch.full( (batch_size, num_queries), fill_value=self.num_classes, dtype=torch.int64, device=pred_logits.device ) target_classes[idx] = target_classes_o # permute pred_logits (batch_size, num_queries, num_labels) -> (batch_size, num_labels, num_queries) pred_logits_transposed = pred_logits.transpose(1, 2) loss_ce = criterion(pred_logits_transposed, target_classes) losses = {"loss_cross_entropy": loss_ce} return losses def loss_masks( self, masks_queries_logits: Tensor, mask_labels: List[Tensor], indices: Tuple[np.array], num_masks: int ) -> Dict[str, Tensor]: """Compute the losses related to the masks using focal and dice loss. Args: masks_queries_logits (`torch.Tensor`): A tensor of shape `batch_size, num_queries, height, width` mask_labels (`torch.Tensor`): List of mask labels of shape `(labels, height, width)`. indices (`Tuple[np.array])`: The indices computed by the Hungarian matcher. num_masks (`int)`: The number of masks, used for normalization. Returns: `Dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys: - **loss_mask** -- The loss computed using sigmoid ce loss on the predicted and ground truth masks. - **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth masks. """ src_idx = self._get_predictions_permutation_indices(indices) tgt_idx = self._get_targets_permutation_indices(indices) # shape (batch_size * num_queries, height, width) pred_masks = masks_queries_logits[src_idx] # shape (batch_size, num_queries, height, width) # pad all and stack the targets to the num_labels dimension # upsample predictions to the target size, we have to add one dim to use interpolate target_masks, _ = self._pad_images_to_max_in_batch(mask_labels) target_masks = target_masks[tgt_idx] pred_masks = pred_masks[:, None] target_masks = target_masks[:, None] with torch.no_grad(): # sample point_coords point_coords = self.sample_points_using_uncertainty( pred_masks, self.calculate_uncertainty, self.num_points, self.oversample_ratio, self.importance_sample_ratio, ) # get ground-truth labels point_labels = sample_point(target_masks, point_coords, align_corners=False).squeeze(1) point_logits = sample_point(pred_masks, point_coords, align_corners=False).squeeze(1) losses = { "loss_mask": sigmoid_cross_entropy_loss(point_logits, point_labels, num_masks), "loss_dice": dice_loss(point_logits, point_labels, num_masks), } del pred_masks del target_masks return losses # Copied from transformers.models.mask2former.modeling_mask2former.Mask2FormerLoss.calculate_uncertainty def calculate_uncertainty(self, logits: torch.Tensor) -> torch.Tensor: """ In Mask2Former paper, uncertainty is estimated as L1 distance between 0.0 and the logit prediction in 'logits' for the foreground class in `classes`. Args: logits (`torch.Tensor`): A tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is: the number of foreground classes. The values are logits. Returns: scores (`torch.Tensor`): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most uncertain locations having the highest uncertainty score. """ uncertainty_scores = -(torch.abs(logits)) return uncertainty_scores # Copied from transformers.models.mask2former.modeling_mask2former.Mask2FormerLoss.sample_points_using_uncertainty def sample_points_using_uncertainty( self, logits: torch.Tensor, uncertainty_function, num_points: int, oversample_ratio: int, importance_sample_ratio: float, ) -> torch.Tensor: """ This function is meant for sampling points in [0, 1] * [0, 1] coordinate space based on their uncertainty. The uncertainty is calculated for each point using the passed `uncertainty function` that takes points logit prediction as input. Args: logits (`float`): Logit predictions for P points. uncertainty_function: A function that takes logit predictions for P points and returns their uncertainties. num_points (`int`): The number of points P to sample. oversample_ratio (`int`): Oversampling parameter. importance_sample_ratio (`float`): Ratio of points that are sampled via importance sampling. Returns: point_coordinates (`torch.Tensor`): Coordinates for P sampled points. """ num_boxes = logits.shape[0] num_points_sampled = int(num_points * oversample_ratio) # Get random point coordinates point_coordinates = torch.rand(num_boxes, num_points_sampled, 2, device=logits.device) # Get sampled prediction value for the point coordinates point_logits = sample_point(logits, point_coordinates, align_corners=False) # Calculate the uncertainties based on the sampled prediction values of the points point_uncertainties = uncertainty_function(point_logits) num_uncertain_points = int(importance_sample_ratio * num_points) num_random_points = num_points - num_uncertain_points idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] shift = num_points_sampled * torch.arange(num_boxes, dtype=torch.long, device=logits.device) idx += shift[:, None] point_coordinates = point_coordinates.view(-1, 2)[idx.view(-1), :].view(num_boxes, num_uncertain_points, 2) if num_random_points > 0: point_coordinates = torch.cat( [point_coordinates, torch.rand(num_boxes, num_random_points, 2, device=logits.device)], dim=1, ) return point_coordinates def _get_predictions_permutation_indices(self, indices): # permute predictions following indices batch_indices = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) predictions_indices = torch.cat([src for (src, _) in indices]) return batch_indices, predictions_indices def _get_targets_permutation_indices(self, indices): # permute labels following indices batch_indices = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) target_indices = torch.cat([tgt for (_, tgt) in indices]) return batch_indices, target_indices def forward( self, masks_queries_logits: Tensor, class_queries_logits: Tensor, contrastive_queries_logits: Tensor, mask_labels: List[Tensor], class_labels: List[Tensor], text_queries: Tensor, auxiliary_predictions: Optional[Dict[str, Tensor]] = None, calculate_contrastive_loss: bool = True, ) -> Dict[str, Tensor]: """ This performs the loss computation. Args: masks_queries_logits (`torch.Tensor`): A tensor of shape `batch_size, num_queries, height, width` class_queries_logits (`torch.Tensor`): A tensor of shape `batch_size, num_queries, num_labels` contrastive_queries_logits (`torch.Tensor`): A tensor of shape `batch_size, num_queries, hidden_dim` mask_labels (`torch.Tensor`): List of mask labels of shape `(labels, height, width)`. class_labels (`List[torch.Tensor]`): List of class labels of shape `(labels)`. text_queries (`torch.Tensor`): A tensor of shape `batch_size, num_queries, hidden_dim` auxiliary_predictions (`Dict[str, torch.Tensor]`, *optional*): if `use_auxiliary_loss` was set to `true` in [`OneFormerConfig`], then it contains the logits from the inner layers of the Detr's Decoder. calculate_contrastive_loss (`bool`, *optional*, defaults to `True`): Whether or not to calculate the contrastive loss. Returns: `Dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys: - **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels. - **loss_mask** -- The loss computed using sigmoid ce loss on the predicted and ground truth masks. - **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth masks. - **loss_contrastive** -- The query-text contrstive loss computed using object and text queries. if `use_auxiliary_loss` was set to `true` in [`OneFormerConfig`], the dictionary contains addional losses for each auxiliary predictions. """ # retrieve the matching between the outputs of the last layer and the labels indices = self.matcher(masks_queries_logits, class_queries_logits, mask_labels, class_labels) # compute the average number of target masks for normalization purposes num_masks = self.get_num_masks(class_labels, device=class_labels[0].device) # get all the losses losses: Dict[str, Tensor] = { **self.loss_masks(masks_queries_logits, mask_labels, indices, num_masks), **self.loss_labels(class_queries_logits, class_labels, indices), } if calculate_contrastive_loss: losses = {**losses, **self.loss_contrastive(contrastive_queries_logits, text_queries)} # in case of auxiliary losses, we repeat this process with the output of each intermediate layer. if auxiliary_predictions is not None: for idx, aux_outputs in enumerate(auxiliary_predictions): masks_queries_logits = aux_outputs["masks_queries_logits"] class_queries_logits = aux_outputs["class_queries_logits"] loss_dict = self.forward( masks_queries_logits, class_queries_logits, None, mask_labels, class_labels, None, calculate_contrastive_loss=False, ) loss_dict = {f"{key}_{idx}": value for key, value in loss_dict.items()} losses.update(loss_dict) return losses def get_num_masks(self, class_labels: torch.Tensor, device: torch.device) -> torch.Tensor: """ Computes the average number of target masks across the batch, for normalization purposes. """ num_masks = sum([len(classes) for classes in class_labels]) num_masks_pt = torch.as_tensor([num_masks], dtype=torch.float, device=device) return num_masks_pt @dataclass class OneFormerTransformerDecoderOutput(BaseModelOutput): """ Base class for outputs of the Transformer decoder. This class adds attributes for class predictions, mask predictions and contrastive logits to BaseModelOutputWithCrossAttentions. Args: object_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`): Queries representation for the region proposals. contrastive_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`): Queries representation for the contrastive loss. prediction_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`): Mask predictions from last layer of the transformer decoder. prediction_class (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`): Class predictions from last layer of the transformer decoder. auxiliary_predictions (Tuple of Dict of `str, torch.FloatTensor`, *optional*): Tuple of class and mask predictions from each layer of the transformer decoder. """ object_queries: torch.FloatTensor = None contrastive_logits: Optional[torch.FloatTensor] = None prediction_masks: torch.FloatTensor = None prediction_class: torch.FloatTensor = None auxiliary_predictions: Optional[Tuple[Dict[str, torch.FloatTensor]]] = None @dataclass # Copied from transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelDecoderOutput with Mask2->One class OneFormerPixelDecoderOutput(ModelOutput): """ OneFormer's pixel decoder module output, practically a Multi-Scale Deformable Attention based decoder. It returns the mask features and the multiscale features. Args: multi_scale_features (`tuple(torch.FloatTensor)`): Tuple of multi-scale features of scales [1/8, 1/16, 1/32] and shape `(batch_size, num_channels, height, width)`from the Multi-Scale Deformable Attenntion based Pixel Decoder. mask_features (`torch.FloatTensor`): Tensor of shape `(batch_size, num_channels, height, width)`, 1/4 scale features from the last Pixel Decoder Layer. attentions (`tuple(torch.FloatTensor)`, *optional*): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights from pixel decoder. Returned when `output_attentions=True` is passed or when `config.output_attentions=True` """ multi_scale_features: Tuple[torch.FloatTensor] = None mask_features: torch.FloatTensor = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class OneFormerPixelLevelModuleOutput(ModelOutput): """ OneFormer's pixel level module output. It returns both the last and (optionally) the hidden states from the `encoder` and `decoder`. By default, the `encoder` is a Swin/Dinat Backbone and the `decoder` is a Multi-Scale Deformable Attention based decoder. Args: encoder_features (List of `(torch.FloatTensor)`): List of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at the output of each stage. decoder_features (List of `(torch.FloatTensor)`): List of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at the output of each stage. decoder_last_feature (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)): 1/4 scale features from the last Pixel Decoder Layer. """ encoder_features: List[torch.FloatTensor] = None decoder_features: List[torch.FloatTensor] = None decoder_last_feature: torch.FloatTensor = None @dataclass class OneFormerModelOutput(ModelOutput): """ Class for outputs of [`OneFormerModel`]. This class returns all the needed hidden states to compute the logits. Args: encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder model at the output of each stage. pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel decoder model at the output of each stage. transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the transformer decoder at the output of each stage. transformer_decoder_object_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`) Output object queries from the last layer in the transformer decoder. transformer_decoder_contrastive_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`) Contrastive queries from the transformer decoder. transformer_decoder_mask_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`) Mask Predictions from the last layer in the transformer decoder. transformer_decoder_class_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`): Class Predictions from the last layer in the transformer decoder. transformer_decoder_auxiliary_predictions (Tuple of Dict of `str, torch.FloatTensor`, *optional*): Tuple of class and mask predictions from each layer of the transformer decoder. text_queries (`torch.FloatTensor`, *optional* of shape `(batch_size, num_queries, hidden_dim)`) Text queries derived from the input text list used for calculating contrastive loss during training. task_token (`torch.FloatTensor` of shape `(batch_size, hidden_dim)`) 1D task token to condition the queries. attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Self and Cross Attentions weights from transformer decoder. """ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None pixel_decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None transformer_decoder_hidden_states: Optional[torch.FloatTensor] = None transformer_decoder_object_queries: torch.FloatTensor = None transformer_decoder_contrastive_queries: Optional[torch.FloatTensor] = None transformer_decoder_mask_predictions: torch.FloatTensor = None transformer_decoder_class_predictions: torch.FloatTensor = None transformer_decoder_auxiliary_predictions: Optional[Tuple[Dict[str, torch.FloatTensor]]] = None text_queries: Optional[torch.FloatTensor] = None task_token: torch.FloatTensor = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class OneFormerForUniversalSegmentationOutput(ModelOutput): """ Class for outputs of [`OneFormerForUniversalSegmentationOutput`]. This output can be directly passed to [`~OneFormerImageProcessor.post_process_semantic_segmentation`] or [`~OneFormerImageProcessor.post_process_instance_segmentation`] or [`~OneFormerImageProcessor.post_process_panoptic_segmentation`] depending on the task. Please, see [`~OneFormerImageProcessor] for details regarding usage. Args: loss (`torch.Tensor`, *optional*): The computed loss, returned when labels are present. class_queries_logits (`torch.FloatTensor`): A tensor of shape `(batch_size, num_queries, num_labels + 1)` representing the proposed classes for each query. Note the `+ 1` is needed because we incorporate the null class. masks_queries_logits (`torch.FloatTensor`): A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each query. auxiliary_predictions (List of Dict of `str, torch.FloatTensor`, *optional*): List of class and mask predictions from each layer of the transformer decoder. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder model at the output of each stage. pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel decoder model at the output of each stage. transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the transformer decoder at the output of each stage. transformer_decoder_object_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`) Output object queries from the last layer in the transformer decoder. transformer_decoder_contrastive_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`) Contrastive queries from the transformer decoder. transformer_decoder_mask_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`) Mask Predictions from the last layer in the transformer decoder. transformer_decoder_class_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`): Class Predictions from the last layer in the transformer decoder. transformer_decoder_auxiliary_predictions (List of Dict of `str, torch.FloatTensor`, *optional*): List of class and mask predictions from each layer of the transformer decoder. text_queries (`torch.FloatTensor`, *optional* of shape `(batch_size, num_queries, hidden_dim)`) Text queries derived from the input text list used for calculating contrastive loss during training. task_token (`torch.FloatTensor` of shape `(batch_size, hidden_dim)`) 1D task token to condition the queries. attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Self and Cross Attentions weights from transformer decoder. """ loss: Optional[torch.FloatTensor] = None class_queries_logits: torch.FloatTensor = None masks_queries_logits: torch.FloatTensor = None auxiliary_predictions: List[Dict[str, torch.FloatTensor]] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None pixel_decoder_hidden_states: Optional[List[torch.FloatTensor]] = None transformer_decoder_hidden_states: Optional[torch.FloatTensor] = None transformer_decoder_object_queries: torch.FloatTensor = None transformer_decoder_contrastive_queries: Optional[torch.FloatTensor] = None transformer_decoder_mask_predictions: torch.FloatTensor = None transformer_decoder_class_predictions: torch.FloatTensor = None transformer_decoder_auxiliary_predictions: Optional[List[Dict[str, torch.FloatTensor]]] = None text_queries: Optional[torch.FloatTensor] = None task_token: torch.FloatTensor = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None # Modified from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrFrozenBatchNorm2d with DeformableDetr->OneFormerPixelDecoder class OneFormerPixelDecoderFrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super().__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x): weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-5 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias # Modified from transformers.models.detr.modeling_deformable_detr.DeformableDetrMultiscaleDeformableAttention with DeformableDetr->OneFormerPixelDecoderEncoder class OneFormerPixelDecoderEncoderMultiscaleDeformableAttention(nn.Module): """ Multiscale deformable attention as proposed in Deformable DETR. """ def __init__(self, embed_dim: int, num_heads: int, n_levels: int, n_points: int): super().__init__() if embed_dim % num_heads != 0: raise ValueError( f"embed_dim (d_model) must be divisible by num_heads, but got {embed_dim} and {num_heads}" ) dim_per_head = embed_dim // num_heads # check if dim_per_head is power of 2 if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0): warnings.warn( "You'd better set embed_dim (d_model) in DeformableDetrMultiscaleDeformableAttention to make the" " dimension of each attention head a power of 2 which is more efficient in the authors' CUDA" " implementation." ) self.im2col_step = 128 self.d_model = embed_dim self.n_levels = n_levels self.n_heads = num_heads self.n_points = n_points self.sampling_offsets = nn.Linear(embed_dim, num_heads * n_levels * n_points * 2) self.attention_weights = nn.Linear(embed_dim, num_heads * n_levels * n_points) self.value_proj = nn.Linear(embed_dim, embed_dim) self.output_proj = nn.Linear(embed_dim, embed_dim) def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool = False, ): # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states = self.with_pos_embed(hidden_states, position_embeddings) batch_size, num_queries, _ = hidden_states.shape batch_size, sequence_length, _ = encoder_hidden_states.shape if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length: raise ValueError( "Make sure to align the spatial shapes with the sequence length of the encoder hidden states" ) value = self.value_proj(encoder_hidden_states) if attention_mask is not None: # we invert the attention_mask value = value.masked_fill(attention_mask[..., None], float(0)) value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads) sampling_offsets = self.sampling_offsets(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2 ) attention_weights = self.attention_weights(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels * self.n_points ) attention_weights = nn.functional.softmax(attention_weights, -1).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points ) # batch_size, num_queries, n_heads, n_levels, n_points, 2 if reference_points.shape[-1] == 2: offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) sampling_locations = ( reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :] ) elif reference_points.shape[-1] == 4: sampling_locations = ( reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 ) else: raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}") # PyTorch implementation output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights) output = self.output_proj(output) return output, attention_weights class OneFormerPixelDecoderEncoderLayer(nn.Module): def __init__(self, config: OneFormerConfig): super().__init__() self.embed_dim = config.conv_dim self.self_attn = OneFormerPixelDecoderEncoderMultiscaleDeformableAttention( embed_dim=self.embed_dim, num_heads=config.num_attention_heads, n_levels=3, n_points=4, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.dropout = config.dropout self.activation_fn = nn.functional.relu self.activation_dropout = config.dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_feedforward_dim) self.fc2 = nn.Linear(config.encoder_feedforward_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.is_training = config.is_training def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor = None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool = False, ): """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Input to the layer. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Attention mask. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings, to be added to `hidden_states`. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes of the backbone feature maps. level_start_index (`torch.LongTensor`, *optional*): Level start index. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Apply Multi-scale Deformable Attention Module on the multi-scale feature maps. hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.is_training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.is_training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.is_training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.is_training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Modified from from transformers.models.detr.modeling_deformable_detr.DeformableDetrEncoder with DeformableDetrEncoder->OneFormerPixelDecoderEncoderOnly class OneFormerPixelDecoderEncoderOnly(nn.Module): """ Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a [`OneFormerPixelDecoderEncoderLayer`]. The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers. Args: config: OneFormerConfig """ def __init__(self, config: OneFormerConfig): super().__init__() self.config = config self.dropout = config.dropout self.layers = nn.ModuleList([OneFormerPixelDecoderEncoderLayer(config) for _ in range(config.encoder_layers)]) @staticmethod def get_reference_points(spatial_shapes, valid_ratios, device): """ Get reference points for each feature map. Used in decoder. Args: spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Valid ratios of each feature map. device (`torch.device`): Device on which to create the tensors. Returns: `torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)` """ reference_points_list = [] for lvl, (height, width) in enumerate(spatial_shapes): ref_y, ref_x = torch.meshgrid( torch.linspace(0.5, height - 0.5, height, dtype=valid_ratios.dtype, device=device), torch.linspace(0.5, width - 0.5, width, dtype=valid_ratios.dtype, device=device), ) ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * height) ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * width) ref = torch.stack((ref_x, ref_y), -1) reference_points_list.append(ref) reference_points = torch.cat(reference_points_list, 1) reference_points = reference_points[:, :, None] * valid_ratios[:, None] return reference_points def forward( self, inputs_embeds=None, attention_mask=None, position_embeddings=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Position embeddings that are added to the queries and keys in each self-attention layer. spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`): Starting index of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Ratio of valid area in each feature level. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=inputs_embeds.device) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) # Modified from from transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelDecoder with Mask2->One class OneFormerPixelDecoder(nn.Module): def __init__(self, config: OneFormerConfig, feature_channels): super().__init__() self.config = config # positional encoding self.position_embedding = OneFormerSinePositionEmbedding(num_pos_feats=config.conv_dim // 2, normalize=True) self.num_feature_levels = 3 transformer_in_channels = feature_channels[-self.num_feature_levels :] self.transformer_feature_strides = config.strides[-self.num_feature_levels :] self.feature_channels = feature_channels self.level_embed = nn.Parameter(torch.Tensor(self.num_feature_levels, config.conv_dim)) # Create input projection layers if self.num_feature_levels > 1: input_projections_list = [] for in_channels in transformer_in_channels[::-1]: input_projections_list.append( nn.Sequential( nn.Conv2d(in_channels, config.conv_dim, kernel_size=1), nn.GroupNorm(32, config.conv_dim), ) ) self.input_projections = nn.ModuleList(input_projections_list) else: self.input_projections = nn.ModuleList( [ nn.Sequential( nn.Conv2d(transformer_in_channels[-1], config.conv_dim, kernel_size=1), nn.GroupNorm(32, config.conv_dim), ) ] ) self.encoder = OneFormerPixelDecoderEncoderOnly(config) self.mask_projection = nn.Conv2d( config.conv_dim, config.mask_dim, kernel_size=1, stride=1, padding=0, ) self.common_stride = config.common_stride # extra fpn levels stride = min(self.transformer_feature_strides) self.num_fpn_levels = int(np.log2(stride) - np.log2(self.common_stride)) lateral_convs = [] output_convs = [] for idx, in_channels in enumerate(self.feature_channels[: self.num_fpn_levels]): lateral_conv = nn.Sequential( nn.Conv2d( in_channels, config.conv_dim, kernel_size=1, bias=False, ), nn.GroupNorm(32, config.conv_dim), ) output_conv = nn.Sequential( nn.Conv2d( config.conv_dim, config.conv_dim, kernel_size=3, stride=1, padding=1, bias=False, ), nn.GroupNorm(32, config.conv_dim), nn.ReLU(), ) self.add_module("adapter_{}".format(idx + 1), lateral_conv) self.add_module("layer_{}".format(idx + 1), output_conv) lateral_convs.append(lateral_conv) output_convs.append(output_conv) # Place convs into top-down order (from low to high resolution) # to make the top-down computation in forward clearer. self.lateral_convs = lateral_convs[::-1] self.output_convs = output_convs[::-1] def get_valid_ratio(self, mask, dtype=torch.float32): """Get the valid ratio of all feature maps.""" _, height, width = mask.shape valid_height = torch.sum(~mask[:, :, 0], 1) valid_width = torch.sum(~mask[:, 0, :], 1) valid_ratio_heigth = valid_height.to(dtype) / height valid_ratio_width = valid_width.to(dtype) / width valid_ratio = torch.stack([valid_ratio_width, valid_ratio_heigth], -1) return valid_ratio def forward( self, features, encoder_outputs=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) # Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) sources = [] position_embeddings_list = [] for level, source in enumerate(features[::-1][: self.num_feature_levels]): sources.append(self.input_projections[level](source)) position_embeddings_list.append(self.position_embedding(source)) masks = [torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) for x in sources] # Prepare encoder inputs (by flattening) source_flatten = [] mask_flatten = [] lvl_pos_embed_flatten = [] spatial_shapes = [] for level, (source, mask, pos_embed) in enumerate(zip(sources, masks, position_embeddings_list)): batch_size, num_channels, height, width = source.shape spatial_shape = (height, width) spatial_shapes.append(spatial_shape) source = source.flatten(2).transpose(1, 2) mask = mask.flatten(1) pos_embed = pos_embed.flatten(2).transpose(1, 2) lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1) lvl_pos_embed_flatten.append(lvl_pos_embed) source_flatten.append(source) mask_flatten.append(mask) source_flatten = torch.cat(source_flatten, 1) mask_flatten = torch.cat(mask_flatten, 1) lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device) level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) valid_ratios = torch.stack([self.get_valid_ratio(m, dtype=source_flatten.dtype) for m in masks], 1) # Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder # Also provide spatial_shapes, level_start_index and valid_ratios if encoder_outputs is None: encoder_outputs = self.encoder( inputs_embeds=source_flatten, attention_mask=mask_flatten, position_embeddings=lvl_pos_embed_flatten, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) y = encoder_outputs.last_hidden_state bs = y.shape[0] split_size_or_sections = [None] * self.num_feature_levels for i in range(self.num_feature_levels): if i < self.num_feature_levels - 1: split_size_or_sections[i] = level_start_index[i + 1] - level_start_index[i] else: split_size_or_sections[i] = y.shape[1] - level_start_index[i] y = torch.split(y, split_size_or_sections, dim=1) out = [] multi_scale_features = [] num_cur_levels = 0 for i, z in enumerate(y): out.append(z.transpose(1, 2).view(bs, -1, spatial_shapes[i][0], spatial_shapes[i][1])) # append `out` with extra FPN levels # Reverse feature maps into top-down order (from low to high resolution) for idx, feats in enumerate(features[: self.num_fpn_levels][::-1]): lateral_conv = self.lateral_convs[idx] output_conv = self.output_convs[idx] cur_fpn = lateral_conv(feats) # Following FPN implementation, we use nearest upsampling here y = cur_fpn + nn.functional.interpolate( out[-1], size=cur_fpn.shape[-2:], mode="bilinear", align_corners=False ) y = output_conv(y) out.append(y) for o in out: if num_cur_levels < self.num_feature_levels: multi_scale_features.append(o) num_cur_levels += 1 return OneFormerPixelDecoderOutput( mask_features=self.mask_projection(out[-1]), multi_scale_features=multi_scale_features, attentions=encoder_outputs.attentions, ) # Modified from from transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelLevelModule with Mask2->One class OneFormerPixelLevelModule(nn.Module): def __init__(self, config: OneFormerConfig): """ Pixel Level Module proposed in [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527). It runs the input image through a backbone and a pixel decoder, generating multi-scale feature maps and pixel embeddings. Args: config ([`OneFormerConfig`]): The configuration used to instantiate this model. """ super().__init__() self.encoder = load_backbone(config) self.decoder = OneFormerPixelDecoder(config, feature_channels=self.encoder.channels) def forward(self, pixel_values: Tensor, output_hidden_states: bool = False) -> OneFormerPixelLevelModuleOutput: features: List[Tensor] = self.encoder(pixel_values).feature_maps decoder_output: OneFormerPixelDecoderOutput = self.decoder(features, output_hidden_states=output_hidden_states) return OneFormerPixelLevelModuleOutput( encoder_features=tuple(features), decoder_features=decoder_output.multi_scale_features, decoder_last_feature=decoder_output.mask_features, ) # Modified from transformers.models.detr.modeling_detr.DetrAttention with Detr->OneFormer class OneFormerAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and keys (as explained in the DETR paper). """ def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_embeddings: Optional[torch.Tensor] = None, key_value_states: Optional[torch.Tensor] = None, key_value_position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" hidden_states = hidden_states.permute(1, 0, 2) if hidden_states is not None else None position_embeddings = position_embeddings.permute(1, 0, 2) if position_embeddings is not None else None key_value_states = key_value_states.permute(1, 0, 2) if key_value_states is not None else None key_value_position_embeddings = ( key_value_position_embeddings.permute(1, 0, 2) if key_value_position_embeddings is not None else None ) # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size, target_len, embed_dim = hidden_states.size() # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states_original = hidden_states hidden_states = self.with_pos_embed(hidden_states, position_embeddings) # add key-value position embeddings to the key value states if key_value_position_embeddings is not None: key_value_states_original = key_value_states key_value_states = self.with_pos_embed(key_value_states, key_value_position_embeddings) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, batch_size) value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention mask should be of size {(target_len, batch_size * self.num_heads, source_len)}, but is" f" {attention_mask.size()}" ) attn_weights += attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, embed_dim) attn_output = self.out_proj(attn_output).permute(1, 0, 2) return attn_output, attn_weights_reshaped class OneFormerTransformerDecoderSelfAttentionLayer(nn.Module): def __init__( self, embed_dim, num_heads, dropout=0.0, activation="relu", normalize_before=False, layer_norm_eps=1e-05 ): super().__init__() self.self_attn = OneFormerAttention(embed_dim=embed_dim, num_heads=num_heads, dropout=dropout, is_decoder=True) self.norm = nn.LayerNorm(embed_dim, eps=layer_norm_eps) self.dropout = nn.Dropout(dropout) self.activation = ACT2FN[activation] self.normalize_before = normalize_before def with_pos_embed(self, tensor, pos: Optional[Tensor]): return tensor if pos is None else tensor + pos def forward_post( self, output, output_mask: Optional[Tensor] = None, output_key_padding_mask: Optional[Tensor] = None, query_pos: Optional[Tensor] = None, ): output2, attention_weights = self.self_attn( hidden_states=output, position_embeddings=query_pos, attention_mask=output_mask, output_attentions=True ) output = output + self.dropout(output2) output = self.norm(output) return output, attention_weights def forward_pre( self, output, output_mask: Optional[Tensor] = None, output_key_padding_mask: Optional[Tensor] = None, query_pos: Optional[Tensor] = None, ): output2 = self.norm(output) output2, attention_weights = self.self_attn( hidden_states=output2, position_embeddings=query_pos, attention_mask=output_mask, output_attentions=True ) output = output + self.dropout(output2) return output, attention_weights def forward( self, output, output_mask: Optional[Tensor] = None, output_key_padding_mask: Optional[Tensor] = None, query_pos: Optional[Tensor] = None, ): if self.normalize_before: return self.forward_pre(output, output_mask, output_key_padding_mask, query_pos) return self.forward_post(output, output_mask, output_key_padding_mask, query_pos) class OneFormerTransformerDecoderCrossAttentionLayer(nn.Module): def __init__( self, embed_dim, num_heads, dropout=0.0, activation="relu", normalize_before=False, layer_norm_eps=1e-05 ): super().__init__() self.multihead_attn = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout) self.norm = nn.LayerNorm(embed_dim, eps=layer_norm_eps) self.dropout = nn.Dropout(dropout) self.activation = ACT2FN[activation] self.normalize_before = normalize_before def with_pos_embed(self, tensor, pos: Optional[Tensor]): return tensor if pos is None else tensor + pos def forward_post( self, output, memory, memory_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None, pos: Optional[Tensor] = None, query_pos: Optional[Tensor] = None, ): output2, attention_weights = self.multihead_attn( query=self.with_pos_embed(output, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask, ) output = output + self.dropout(output2) output = self.norm(output) return output, attention_weights def forward_pre( self, output, memory, memory_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None, pos: Optional[Tensor] = None, query_pos: Optional[Tensor] = None, ): output2 = self.norm(output) output2, attention_weights = self.multihead_attn( query=self.with_pos_embed(output2, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask, ) output = output + self.dropout(output2) return output, attention_weights def forward( self, output, memory, memory_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None, pos: Optional[Tensor] = None, query_pos: Optional[Tensor] = None, ): if self.normalize_before: return self.forward_pre(output, memory, memory_mask, memory_key_padding_mask, pos, query_pos) return self.forward_post(output, memory, memory_mask, memory_key_padding_mask, pos, query_pos) class OneFormerTransformerDecoderFFNLayer(nn.Module): def __init__( self, d_model, dim_feedforward=2048, dropout=0.0, activation="relu", normalize_before=False, layer_norm_eps=1e-05, ): super().__init__() # Implementation of Feedforward model self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm = nn.LayerNorm(d_model, eps=layer_norm_eps) self.activation = ACT2FN[activation] self.normalize_before = normalize_before def with_pos_embed(self, tensor, pos: Optional[Tensor]): return tensor if pos is None else tensor + pos def forward_post(self, output): output2 = self.linear2(self.dropout(self.activation(self.linear1(output)))) output = output + self.dropout(output2) output = self.norm(output) return output def forward_pre(self, output): output2 = self.norm(output) output2 = self.linear2(self.dropout(self.activation(self.linear1(output2)))) output = output + self.dropout(output2) return output def forward(self, output): if self.normalize_before: return self.forward_pre(output) return self.forward_post(output) class OneFormerMLPPredictionHead(nn.Module): def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int = 3): """ A classic Multi Layer Perceptron (MLP). Args: input_dim (`int`): The input dimensions. hidden_dim (`int`): The hidden dimensions. output_dim (`int`): The output dimensions. num_layers (int, *optional*, defaults to 3): The number of layers. """ super().__init__() in_dims = [input_dim] + [hidden_dim] * (num_layers - 1) out_dims = [hidden_dim] * (num_layers - 1) + [output_dim] layers = [] for i, (in_dim, out_dim) in enumerate(zip(in_dims, out_dims)): layers.append( PredictionBlock(in_dim, out_dim, activation=nn.ReLU() if i < num_layers - 1 else nn.Identity()) ) self.layers = nn.Sequential(*layers) def forward(self, input: Tensor) -> Tensor: return self.layers(input) # refactored from original implementation class OneFormerTransformerDecoderLayer(nn.Module): def __init__(self, config: OneFormerConfig): super().__init__() self.embed_dim = config.hidden_dim self.num_feature_levels = 3 self.cross_attn = OneFormerTransformerDecoderCrossAttentionLayer( embed_dim=self.embed_dim, num_heads=config.num_attention_heads, dropout=0.0, normalize_before=config.pre_norm, layer_norm_eps=config.layer_norm_eps, ) self.self_attn = OneFormerTransformerDecoderSelfAttentionLayer( embed_dim=self.embed_dim, num_heads=config.num_attention_heads, dropout=0.0, normalize_before=config.pre_norm, layer_norm_eps=config.layer_norm_eps, ) self.ffn = OneFormerTransformerDecoderFFNLayer( d_model=self.embed_dim, dim_feedforward=config.dim_feedforward, dropout=0.0, normalize_before=config.pre_norm, layer_norm_eps=config.layer_norm_eps, ) def forward( self, index: int, output: torch.Tensor, multi_stage_features: List[torch.Tensor], multi_stage_positional_embeddings: List[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, query_embeddings: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ): """ Args: index (`int`): index of the layer in the Transformer decoder. output (`torch.FloatTensor`): the object queries of shape `(N, batch, hidden_dim)` multi_stage_features (`List[torch.Tensor]`): the multi-scale features from the pixel decoder. multi_stage_positional_embeddings (`List[torch.Tensor]`): positional embeddings for the multi_stage_features attention_mask (`torch.FloatTensor`): attention mask for the masked cross attention layer query_embeddings (`torch.FloatTensor`, *optional*): position embeddings that are added to the queries and keys in the self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ level_index = index % self.num_feature_levels attention_mask[torch.where(attention_mask.sum(-1) == attention_mask.shape[-1])] = False # Masked Cross Attention output, cross_attn_weights = self.cross_attn( output, multi_stage_features[level_index], memory_mask=attention_mask, memory_key_padding_mask=None, # here we do not apply masking on padded region pos=multi_stage_positional_embeddings[level_index], query_pos=query_embeddings, ) # Self Attention output, self_attn_weights = self.self_attn( output, output_mask=None, output_key_padding_mask=None, query_pos=query_embeddings, ) # Fully Connected output = self.ffn(output) outputs = (output,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs class OneFormerTransformerDecoderQueryTransformerDecoder(nn.Module): def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): super().__init__() self.layers = _get_clones(decoder_layer, num_layers) self.num_layers = num_layers self.norm = norm self.return_intermediate = return_intermediate def forward( self, output, memory, output_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, output_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None, pos: Optional[Tensor] = None, query_pos: Optional[Tensor] = None, ): intermediate = [] for layer in self.layers: output = layer( output, memory, output_mask=output_mask, memory_mask=memory_mask, output_key_padding_mask=output_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, pos=pos, query_pos=query_pos, ) if self.return_intermediate: intermediate.append(self.norm(output)) if self.norm is not None: output = self.norm(output) if self.return_intermediate: intermediate.pop() intermediate.append(output) if self.return_intermediate: return torch.stack(intermediate) return output.unsqueeze(0) class OneFormerTransformerDecoderQueryTransformerDecoderLayer(nn.Module): def __init__( self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False, layer_norm_eps=1e-05, ): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) # Implementation of Feedforward model self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.activation = ACT2FN[activation] self.normalize_before = normalize_before def with_pos_embed(self, tensor, pos: Optional[Tensor]): return tensor if pos is None else tensor + pos def forward_post( self, output, memory, output_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, output_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None, pos: Optional[Tensor] = None, query_pos: Optional[Tensor] = None, ): q = k = self.with_pos_embed(output, query_pos) output2 = self.self_attn(q, k, value=output, attn_mask=output_mask, key_padding_mask=output_key_padding_mask) output2 = output2[0] output = output + self.dropout1(output2) output = self.norm1(output) output2 = self.multihead_attn( query=self.with_pos_embed(output, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask, ) output2 = output2[0] output = output + self.dropout2(output2) output = self.norm2(output) output2 = self.linear2(self.dropout(self.activation(self.linear1(output)))) output = output + self.dropout3(output2) output = self.norm3(output) return output def forward_pre( self, output, memory, output_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, output_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None, pos: Optional[Tensor] = None, query_pos: Optional[Tensor] = None, ): output2 = self.norm1(output) q = k = self.with_pos_embed(output2, query_pos) output2 = self.self_attn(q, k, value=output2, attn_mask=output_mask, key_padding_mask=output_key_padding_mask) output2 = output2[0] output = output + self.dropout1(output2) output2 = self.norm2(output) output2 = self.multihead_attn( query=self.with_pos_embed(output2, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask, ) output2 = output2[0] output = output + self.dropout2(output2) output2 = self.norm3(output) output2 = self.linear2(self.dropout(self.activation(self.linear1(output2)))) output = output + self.dropout3(output2) return output def forward( self, output, memory, output_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, output_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None, pos: Optional[Tensor] = None, query_pos: Optional[Tensor] = None, ): if self.normalize_before: return self.forward_pre( output, memory, output_mask, memory_mask, output_key_padding_mask, memory_key_padding_mask, pos, query_pos, ) return self.forward_post( output, memory, output_mask, memory_mask, output_key_padding_mask, memory_key_padding_mask, pos, query_pos, ) class OneFormerTransformerDecoderQueryTransformer(nn.Module): def __init__( self, d_model=512, nhead=8, num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False, return_intermediate_dec=False, layer_norm_eps=1e-05, ): super().__init__() decoder_layer = OneFormerTransformerDecoderQueryTransformerDecoderLayer( d_model, nhead, dim_feedforward, dropout, activation, normalize_before, layer_norm_eps ) decoder_norm = nn.LayerNorm(d_model, eps=layer_norm_eps) self.decoder = OneFormerTransformerDecoderQueryTransformerDecoder( decoder_layer, num_decoder_layers, decoder_norm, return_intermediate=return_intermediate_dec, ) self.d_model = d_model self.nhead = nhead def forward(self, src, mask, query_embed, pos_embed, task_token=None): batch_size = src.shape[0] src = src.flatten(2).permute(2, 0, 1) pos_embed = pos_embed.flatten(2).permute(2, 0, 1) query_embed = query_embed.unsqueeze(1).repeat(1, batch_size, 1) if mask is not None: mask = mask.flatten(1) if task_token is None: queries = torch.zeros_like(query_embed) else: queries = task_token.repeat(query_embed.shape[0], 1, 1) queries = self.decoder(queries, src, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed) return queries.transpose(1, 2) class OneFormerTransformerDecoder(nn.Module): """ Transformer decoder """ def __init__(self, in_channels: int, config: OneFormerConfig): super().__init__() self.config = config self.dropout = config.dropout self.num_heads = config.num_attention_heads self.is_training = config.is_training self.use_task_norm = config.use_task_norm self.use_auxiliary_loss = config.use_auxiliary_loss self.query_transformer = OneFormerTransformerDecoderQueryTransformer( d_model=config.hidden_dim, dropout=config.dropout, nhead=config.num_attention_heads, dim_feedforward=config.dim_feedforward, num_decoder_layers=config.query_dec_layers, normalize_before=config.pre_norm, return_intermediate_dec=False, layer_norm_eps=config.layer_norm_eps, ) self.decoder_norm = nn.LayerNorm(config.hidden_dim, eps=config.layer_norm_eps) self.num_feature_levels = 3 self.layers = nn.ModuleList( [OneFormerTransformerDecoderLayer(config) for _ in range(config.decoder_layers - 1)] ) self.query_input_projection = nn.Conv2d(in_channels, config.hidden_dim, kernel_size=1) self.class_embed = nn.Linear(config.hidden_dim, config.num_labels + 1) self.mask_embed = OneFormerMLPPredictionHead( config.hidden_dim, config.hidden_dim, config.mask_dim, 3, ) def forward( self, task_token=None, multi_stage_features=None, multi_stage_positional_embeddings=None, mask_features=None, query_features=None, query_embeddings=None, query_embedder=None, size_list=None, output_attentions=None, ): if self.use_task_norm: task_token = self.decoder_norm(task_token) object_queries = self.query_transformer( query_features, None, query_embedder.weight[:-1], self.query_input_projection(mask_features), task_token if self.use_task_norm else None, ) object_queries = object_queries[0].permute(1, 0, 2) queries = torch.cat([object_queries, task_token], dim=0) output = queries.clone() intermediate_class_predictions = [] intermediate_mask_predictions = [] # prediction heads on learnable query features outputs_class, outputs_mask, attention_mask = self.forward_prediction_heads( output, mask_features, attention_mask_target_size=size_list[0] ) intermediate_class_predictions.append(outputs_class) intermediate_mask_predictions.append(outputs_mask) attentions = () for index, layer in enumerate(self.layers): layer_outputs = layer( index=index, output=output, multi_stage_features=multi_stage_features, multi_stage_positional_embeddings=multi_stage_positional_embeddings, attention_mask=attention_mask, query_embeddings=query_embeddings, output_attentions=output_attentions, ) output = layer_outputs[0] attentions += (layer_outputs[1:],) outputs_class, outputs_mask, attention_mask = self.forward_prediction_heads( output, mask_features, attention_mask_target_size=size_list[(index + 1) % self.num_feature_levels] ) intermediate_class_predictions.append(outputs_class) intermediate_mask_predictions.append(outputs_mask) if not len(intermediate_mask_predictions) == len(self.layers) + 1: raise ValueError( "Intermediate predictions in the transformer decoder must have the same number of elements as number" " of layers" ) object_queries = layer_outputs[0].permute(1, 0, 2) contrastive_logits = queries.permute(1, 0, 2) return OneFormerTransformerDecoderOutput( object_queries=object_queries, contrastive_logits=contrastive_logits, prediction_masks=intermediate_mask_predictions[-1], prediction_class=intermediate_class_predictions[-1], auxiliary_predictions=self._get_aux_predictions( intermediate_class_predictions, intermediate_mask_predictions ) if self.use_auxiliary_loss else None, attentions=attentions, ) def forward_prediction_heads(self, output, mask_features, attention_mask_target_size): decoder_output = self.decoder_norm(output) decoder_output = decoder_output.transpose(0, 1) outputs_class = self.class_embed(decoder_output) mask_embed = self.mask_embed(decoder_output) outputs_mask = torch.einsum("bqc,bchw->bqhw", mask_embed, mask_features) attention_mask = nn.functional.interpolate( outputs_mask, size=attention_mask_target_size, mode="bilinear", align_corners=False ) # must use bool type # If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged. attention_mask = ( attention_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, self.num_heads, 1, 1).flatten(0, 1) < 0.5 ).bool() attention_mask = attention_mask.detach() return outputs_class, outputs_mask, attention_mask @torch.jit.unused def _get_aux_predictions(self, outputs_class, outputs_seg_masks): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. aux_list = [ {"class_queries_logits": a, "masks_queries_logits": b} for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1]) ] return tuple(aux_list) class OneFormerTransformerModule(nn.Module): """ The OneFormer's transformer module. """ def __init__(self, in_features: int, config: OneFormerConfig): super().__init__() hidden_dim = config.hidden_dim self.num_feature_levels = 3 self.position_embedder = OneFormerSinePositionEmbedding(num_pos_feats=hidden_dim // 2, normalize=True) self.queries_embedder = nn.Embedding(config.num_queries, hidden_dim) self.input_projections = [] for _ in range(self.num_feature_levels): if in_features != hidden_dim or config.enforce_input_proj: self.input_projections.append(nn.Conv2d(in_features, hidden_dim, kernel_size=1)) else: self.input_projections.append(nn.Sequential()) self.decoder = OneFormerTransformerDecoder(in_channels=in_features, config=config) self.level_embed = nn.Embedding(self.num_feature_levels, hidden_dim) def forward( self, multi_scale_features: List[Tensor], mask_features: Tensor, task_token: Tensor, output_attentions: bool = False, ) -> OneFormerTransformerDecoderOutput: if not len(multi_scale_features) == self.num_feature_levels: raise ValueError( f"Number of elements in multi_scale_features ({len(multi_scale_features)}) and num_feature_levels" f" ({self.num_feature_levels}) do not match!" ) multi_stage_features = [] multi_stage_positional_embeddings = [] size_list = [] for i in range(self.num_feature_levels): size_list.append(multi_scale_features[i].shape[-2:]) multi_stage_positional_embeddings.append(self.position_embedder(multi_scale_features[i], None).flatten(2)) multi_stage_features.append( self.input_projections[i](multi_scale_features[i]).flatten(2) + self.level_embed.weight[i][None, :, None] ) # flatten NxCxHxW to HWxNxC multi_stage_positional_embeddings[-1] = multi_stage_positional_embeddings[-1].permute(2, 0, 1) multi_stage_features[-1] = multi_stage_features[-1].permute(2, 0, 1) _, batch_size, _ = multi_stage_features[0].shape # QxNxC query_embeddings = self.queries_embedder.weight.unsqueeze(1).repeat(1, batch_size, 1) task_token = task_token.unsqueeze(0) query_features = self.position_embedder(mask_features, None) return self.decoder( task_token=task_token, multi_stage_features=multi_stage_features, multi_stage_positional_embeddings=multi_stage_positional_embeddings, mask_features=mask_features, query_features=query_features, query_embeddings=query_embeddings, query_embedder=self.queries_embedder, size_list=size_list, output_attentions=output_attentions, ) # Copied from transformers.models.maskformer.modeling_maskformer.MaskFormerSinePositionEmbedding with Mask->One class OneFormerSinePositionEmbedding(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__( self, num_pos_feats: int = 64, temperature: int = 10000, normalize: bool = False, scale: Optional[float] = None ): super().__init__() if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") self.num_pos_feats = num_pos_feats self.temperature = temperature self.normalize = normalize self.scale = 2 * math.pi if scale is None else scale def forward(self, x: Tensor, mask: Optional[Tensor] = None) -> Tensor: if mask is None: mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) not_mask = (~mask).to(x.dtype) y_embed = not_mask.cumsum(1) x_embed = not_mask.cumsum(2) if self.normalize: eps = 1e-6 y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=x.device).type_as(x) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos # Copied from transformers.models.maskformer.modeling_maskformer.PredictionBlock class PredictionBlock(nn.Module): def __init__(self, in_dim: int, out_dim: int, activation: nn.Module) -> None: super().__init__() self.layers = [nn.Linear(in_dim, out_dim), activation] # Maintain submodule indexing as if part of a Sequential block for i, layer in enumerate(self.layers): self.add_module(str(i), layer) def forward(self, input: Tensor) -> Tensor: hidden_state = input for layer in self.layers: hidden_state = layer(hidden_state) return hidden_state class OneFormerTextMapperAttention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights self.scale = qk_scale or head_dim**-0.5 self.q_proj = nn.Linear(dim, dim, bias=qkv_bias) self.k_proj = nn.Linear(dim, dim, bias=qkv_bias) self.v_proj = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, q, k, v): batch_size, q_sequence_length, num_channels = q.shape if not k.shape == v.shape: raise ValueError(f"keys ({list(k.shape)}) and values ({list(v.shape)}) have different shapes!") batch_size, k_sequence_length, num_channels = k.shape q = self.q_proj(q).reshape(batch_size, q_sequence_length, self.num_heads, num_channels // self.num_heads) k = self.k_proj(k).reshape(batch_size, k_sequence_length, self.num_heads, num_channels // self.num_heads) v = self.v_proj(v).reshape(batch_size, k_sequence_length, self.num_heads, num_channels // self.num_heads) attn = torch.einsum("bnkc,bmkc->bknm", q, k) * self.scale attn = attn.softmax(dim=-1) output = torch.einsum("bknm,bmkc->bnkc", attn, v).reshape(batch_size, q_sequence_length, num_channels) output = self.proj(output) output = self.proj_drop(output) return output class OneFormerTextTransformerDecoderLayer(nn.Module): def __init__( self, d_model, nhead, dropout=0.1, layer_norm_eps=1e-05, ): super().__init__() self.self_attn = OneFormerTextMapperAttention(d_model, nhead, proj_drop=dropout) self.cross_attn = OneFormerTextMapperAttention(d_model, nhead, proj_drop=dropout) self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.dropout = nn.Dropout(dropout) self.mlp = nn.Sequential( nn.Linear(d_model, d_model * 4), nn.GELU(), nn.Dropout(dropout), nn.Linear(d_model * 4, d_model) ) def forward(self, hidden_state, mem): q = k = v = self.norm1(hidden_state) hidden_state = hidden_state + self.self_attn(q, k, v) q = self.norm2(hidden_state) hidden_state = hidden_state + self.cross_attn(q, mem, mem) hidden_state = hidden_state + self.dropout(self.mlp(self.norm3(hidden_state))) return hidden_state class OneFormerTextContextDecoder(nn.Module): def __init__( self, transformer_width=256, transformer_heads=4, transformer_layers=6, visual_dim=1024, dropout=0.1, layer_norm_eps=1e-05, **kwargs, ): super().__init__() self.memory_proj = nn.Sequential( nn.LayerNorm(visual_dim, eps=layer_norm_eps), nn.Linear(visual_dim, transformer_width), nn.LayerNorm(transformer_width, eps=layer_norm_eps), ) self.text_proj = nn.Sequential( nn.LayerNorm(visual_dim, eps=layer_norm_eps), nn.Linear(visual_dim, transformer_width), ) self.decoder = nn.ModuleList( [ OneFormerTextTransformerDecoderLayer(transformer_width, transformer_heads, dropout, layer_norm_eps) for _ in range(transformer_layers) ] ) self.out_proj = nn.Sequential( nn.LayerNorm(transformer_width, eps=layer_norm_eps), nn.Linear(transformer_width, visual_dim) ) def forward(self, text, visual): visual = self.memory_proj(visual) hidden_state = self.text_proj(text) for layer in self.decoder: hidden_state = layer(hidden_state, visual) return self.out_proj(hidden_state) class OneFormerTextMLP(nn.Module): def __init__( self, hidden_size: Optional[int] = None, intermediate_size: Optional[int] = None, output_size: Optional[int] = None, ): super().__init__() self.activation_fn = ACT2FN["quick_gelu"] hidden_size = hidden_size intermediate_size = intermediate_size output_size = output_size self.fc1 = nn.Linear(hidden_size, intermediate_size) self.fc2 = nn.Linear(intermediate_size, output_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class OneFormerTextTransformerLayer(nn.Module): def __init__(self, width: int, heads: int, attn_mask: torch.Tensor, layer_norm_eps=1e-05): super().__init__() self.self_attn = nn.MultiheadAttention(width, heads) self.layer_norm1 = nn.LayerNorm(width, eps=layer_norm_eps) self.mlp = OneFormerTextMLP(width, width * 4, width) self.layer_norm2 = nn.LayerNorm(width, eps=layer_norm_eps) self.attn_mask = attn_mask def forward( self, hidden_states: torch.Tensor, key_padding_mask: Optional[torch.Tensor] = None, ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states = self.self_attn( hidden_states, hidden_states, hidden_states, need_weights=False, key_padding_mask=key_padding_mask, )[0] hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states class OneFormerTextTransformer(nn.Module): def __init__( self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, use_checkpoint=False, layer_norm_eps=1e-05, ): super().__init__() self.width = width self.num_layers = layers self.layers = nn.Sequential( *[OneFormerTextTransformerLayer(width, heads, attn_mask, layer_norm_eps) for _ in range(layers)] ) self.use_checkpoint = use_checkpoint def forward(self, hidden_states: torch.Tensor): for layer in self.layers: if self.use_checkpoint: hidden_states = self._gradient_checkpointing_func(layer, hidden_states) else: hidden_states = layer(hidden_states) return hidden_states class OneFormerTextEncoder(nn.Module): def __init__( self, context_length: int, width: int, layers: int, vocab_size, use_checkpoint=False, layer_norm_eps=1e-05, ): super().__init__() heads = width // 64 self.context_length = context_length self.width = width self.transformer = OneFormerTextTransformer( width=width, layers=layers, heads=heads, attn_mask=self.build_attention_mask(), use_checkpoint=use_checkpoint, layer_norm_eps=layer_norm_eps, ) self.positional_embedding = nn.Parameter(torch.empty(self.context_length, width)) self.ln_final = nn.LayerNorm(width, eps=layer_norm_eps) self.token_embedding = nn.Embedding(vocab_size, width) def build_attention_mask(self): # lazily create causal attention mask, with full attention between the vision tokens # pytorch uses additive attention mask; fill with -inf mask = torch.empty(self.context_length, self.context_length) mask.fill_(float("-inf")) mask.triu_(1) # zero out the lower diagonal return mask def forward(self, text): hidden_state = self.token_embedding(text) hidden_state = hidden_state + self.positional_embedding hidden_state = hidden_state.permute(1, 0, 2) hidden_state = self.transformer(hidden_state) hidden_state = hidden_state.permute(1, 0, 2) hidden_state = self.ln_final(hidden_state) hidden_state = hidden_state[torch.arange(hidden_state.shape[0]), text.argmax(dim=-1)] return hidden_state class OneFormerTextMapper(nn.Module): def __init__(self, config: OneFormerConfig): super().__init__() self.text_encoder = OneFormerTextEncoder( context_length=config.text_encoder_context_length, width=config.text_encoder_width, layers=config.text_encoder_num_layers, vocab_size=config.text_encoder_vocab_size, layer_norm_eps=config.layer_norm_eps, ) self.text_projector = OneFormerMLPPredictionHead( config.text_encoder_width, config.hidden_dim, config.hidden_dim, config.text_encoder_proj_layers, ) if config.text_encoder_n_ctx > 0: self.prompt_ctx = nn.Embedding( config.text_encoder_n_ctx, config.text_encoder_width, ) else: self.prompt_ctx = None def forward( self, inputs: Tensor, ) -> Tensor: text_queries = self.encode_text(inputs) return text_queries def encode_text(self, text): if text.ndim is None: raise ValueError("text must not be NoneType") if text.ndim not in [2, 3]: raise ValueError("Number of dimensions in text must be 2 or 3") squeeze_dim = False num_text = 1 if text.ndim == 3: num_text = text.shape[1] batch_size, num_text, hidden_dim = text.shape text = text.reshape(batch_size * num_text, hidden_dim) squeeze_dim = True # [batch_size, num_channels] encoded_text = self.text_encoder(text) text_queries = self.text_projector(encoded_text) if squeeze_dim: _, hidden_dim = text_queries.shape text_queries = text_queries.reshape(batch_size, num_text, hidden_dim) if self.prompt_ctx is not None: text_queries_ctx = self.prompt_ctx.weight.unsqueeze(0).repeat(text_queries.shape[0], 1, 1) text_queries = torch.cat([text_queries, text_queries_ctx], dim=1) return text_queries class OneFormerTaskModel(nn.Module): def __init__(self, config: OneFormerConfig): super().__init__() self.task_mlp = OneFormerMLPPredictionHead( config.task_seq_len, config.hidden_dim, config.hidden_dim, 2, ) def forward(self, inputs: Tensor) -> Tensor: task_tokens = self.task_mlp(inputs) return task_tokens ONEFORMER_START_DOCSTRING = r""" This model is a PyTorch [nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`OneFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ ONEFORMER_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`OneFormerProcessor`]. See [`OneFormerProcessor.__call__`] for details. task_inputs (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Task inputs. Task inputs can be obtained using [`AutoImageProcessor`]. See [`OneFormerProcessor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of Detr's decoder attention layers. return_dict (`bool`, *optional*): Whether or not to return a [`~OneFormerModelOutput`] instead of a plain tuple. """ class OneFormerPreTrainedModel(PreTrainedModel): config_class = OneFormerConfig base_model_prefix = "model" main_input_name = "pixel_values" def _init_weights(self, module: nn.Module): xavier_std = self.config.init_xavier_std std = self.config.init_std if isinstance(module, OneFormerTransformerModule): if module.input_projections is not None: for input_projection in module.input_projections: if not isinstance(input_projection, nn.Sequential): nn.init.xavier_uniform_(input_projection.weight, gain=xavier_std) nn.init.constant_(input_projection.bias, 0) elif isinstance(module, OneFormerTransformerDecoder): nn.init.xavier_uniform_(module.query_input_projection.weight, gain=xavier_std) nn.init.constant_(module.query_input_projection.bias, 0) module.query_input_projection._is_hf_initialized = True elif isinstance(module, OneFormerPixelDecoderEncoderMultiscaleDeformableAttention): nn.init.constant_(module.sampling_offsets.weight.data, 0.0) thetas = torch.arange(module.n_heads, dtype=torch.int64).float() * (2.0 * math.pi / module.n_heads) grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) grid_init = ( (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) .view(module.n_heads, 1, 1, 2) .repeat(1, module.n_levels, module.n_points, 1) ) for i in range(module.n_points): grid_init[:, :, i, :] *= i + 1 with torch.no_grad(): module.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) nn.init.constant_(module.attention_weights.weight.data, 0.0) nn.init.constant_(module.attention_weights.bias.data, 0.0) nn.init.xavier_uniform_(module.value_proj.weight.data) nn.init.constant_(module.value_proj.bias.data, 0.0) nn.init.xavier_uniform_(module.output_proj.weight.data) nn.init.constant_(module.output_proj.bias.data, 0.0) elif isinstance(module, OneFormerPixelDecoderEncoderOnly): for p in module.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) elif isinstance(module, OneFormerPixelDecoder): for p in module.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) nn.init.normal_(module.level_embed, std=0) elif isinstance(module, OneFormerTransformerDecoderSelfAttentionLayer): for p in module.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p, gain=xavier_std) elif isinstance(module, OneFormerTransformerDecoderCrossAttentionLayer): for p in module.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p, gain=xavier_std) elif isinstance(module, OneFormerTransformerDecoderFFNLayer): for p in module.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p, gain=xavier_std) elif isinstance(module, OneFormerTransformerDecoderQueryTransformer): for p in module.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p, gain=xavier_std) elif isinstance(module, OneFormerPixelLevelModule): for submodule in module.modules(): if isinstance(submodule, (nn.Conv2d, nn.Linear)): submodule.weight.data.normal_(mean=0.0, std=std) if submodule.bias is not None: submodule.bias.data.zero_() elif isinstance(module, OneFormerTextContextDecoder): for submodule in module.modules(): if isinstance(submodule, nn.Linear): nn.init.trunc_normal_(submodule.weight, std=0.02) if isinstance(submodule, nn.Linear) and submodule.bias is not None: nn.init.constant_(submodule.bias, 0) elif isinstance(submodule, nn.LayerNorm): nn.init.constant_(submodule.bias, 0) nn.init.constant_(submodule.weight, 1.0) elif isinstance(module, OneFormerTextTransformer): proj_std = (module.width**-0.5) * ((2 * module.num_layers) ** -0.5) attn_std = module.width**-0.5 fc_std = (2 * module.width) ** -0.5 for layer in module.layers: nn.init.normal_(layer.self_attn.in_proj_weight, std=attn_std) nn.init.normal_(layer.self_attn.out_proj.weight, std=proj_std) nn.init.normal_(layer.mlp.fc1.weight, std=fc_std) nn.init.normal_(layer.mlp.fc2.weight, std=proj_std) elif isinstance(module, OneFormerTextEncoder): nn.init.normal_(module.token_embedding.weight, std=0.02) nn.init.normal_(module.positional_embedding, std=0.01) if hasattr(module, "reference_points"): nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0) nn.init.constant_(module.reference_points.bias.data, 0.0) elif isinstance(module, OneFormerTaskModel): for submodule in module.modules(): if isinstance(module, OneFormerMLPPredictionHead): for submodule in module.modules(): if isinstance(submodule, nn.Linear): nn.init.xavier_uniform_(submodule.weight, gain=xavier_std) nn.init.constant_(submodule.bias, 0) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.MultiheadAttention): module.in_proj_weight.data.normal_(mean=0.0, std=std) module.in_proj_bias.data.zero_() elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @add_start_docstrings( "The bare OneFormer Model outputting raw hidden-states without any specific head on top.", ONEFORMER_START_DOCSTRING, ) class OneFormerModel(OneFormerPreTrainedModel): main_input_name = ["pixel_values", "task_inputs"] def __init__(self, config: OneFormerConfig): super().__init__(config) self.pixel_level_module = OneFormerPixelLevelModule(config) self.transformer_module = OneFormerTransformerModule(in_features=config.conv_dim, config=config) self.task_encoder = OneFormerTaskModel(config) self.is_training = config.is_training if self.is_training: self.text_mapper = OneFormerTextMapper(config) else: self.text_mapper = None self.post_init() @add_start_docstrings_to_model_forward(ONEFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=OneFormerModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Tensor, task_inputs: Tensor, text_inputs: Optional[Tensor] = None, pixel_mask: Optional[Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> OneFormerModelOutput: r""" Returns: `OneFormerModelOutput` Example: ```python >>> import torch >>> from PIL import Image >>> import requests >>> from transformers import OneFormerProcessor, OneFormerModel >>> # download texting image >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> # load processor for preprocessing the inputs >>> processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny") >>> model = OneFormerModel.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny") >>> inputs = processor(image, ["semantic"], return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> mask_predictions = outputs.transformer_decoder_mask_predictions >>> class_predictions = outputs.transformer_decoder_class_predictions >>> f"👉 Mask Predictions Shape: {list(mask_predictions.shape)}, Class Predictions Shape: {list(class_predictions.shape)}" '👉 Mask Predictions Shape: [1, 150, 128, 171], Class Predictions Shape: [1, 150, 151]' ```""" if pixel_values is None: raise ValueError("You have to specify pixel_values") output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, _, height, width = pixel_values.shape if pixel_mask is None: pixel_mask = torch.ones((batch_size, height, width), device=pixel_values.device) pixel_level_module_output = self.pixel_level_module(pixel_values, output_hidden_states) multi_scale_features = pixel_level_module_output.decoder_features mask_features = pixel_level_module_output.decoder_last_feature task_token = self.task_encoder(task_inputs.to(self.dtype)) if self.is_training: text_queries = self.text_mapper(text_inputs) else: text_queries = None transformer_module_output = self.transformer_module( multi_scale_features=multi_scale_features, mask_features=mask_features, task_token=task_token, output_attentions=output_attentions, ) queries = transformer_module_output.object_queries encoder_hidden_states = None pixel_decoder_hidden_states = None transformer_decoder_hidden_states = None if output_hidden_states: encoder_hidden_states = pixel_level_module_output.encoder_features pixel_decoder_hidden_states = (pixel_level_module_output.decoder_last_feature,) for f in pixel_level_module_output.decoder_features: pixel_decoder_hidden_states += (f,) transformer_decoder_hidden_states = transformer_module_output.auxiliary_predictions output = OneFormerModelOutput( encoder_hidden_states=encoder_hidden_states, pixel_decoder_hidden_states=pixel_decoder_hidden_states, transformer_decoder_hidden_states=transformer_decoder_hidden_states, transformer_decoder_object_queries=queries, transformer_decoder_contrastive_queries=transformer_module_output.contrastive_logits, transformer_decoder_mask_predictions=transformer_module_output.prediction_masks, transformer_decoder_class_predictions=transformer_module_output.prediction_class, transformer_decoder_auxiliary_predictions=transformer_module_output.auxiliary_predictions, text_queries=text_queries, task_token=task_token, attentions=transformer_module_output.attentions, ) if not return_dict: output = tuple(v for v in output.values()) return output @add_start_docstrings( "OneFormer Model for instance, semantic and panoptic image segmentation.", ONEFORMER_START_DOCSTRING, ) class OneFormerForUniversalSegmentation(OneFormerPreTrainedModel): main_input_name = ["pixel_values", "task_inputs"] def __init__(self, config: OneFormerConfig): super().__init__(config) self.model = OneFormerModel(config) self.matcher = OneFormerHungarianMatcher( cost_class=config.class_weight, cost_dice=config.dice_weight, cost_mask=config.mask_weight, num_points=config.train_num_points, ) self.weight_dict: Dict[str, float] = { "loss_cross_entropy": config.class_weight, "loss_mask": config.mask_weight, "loss_dice": config.dice_weight, "loss_contrastive": config.contrastive_weight, } self.criterion = OneFormerLoss( num_classes=config.num_labels, matcher=self.matcher, weight_dict=self.weight_dict, eos_coef=config.no_object_weight, num_points=config.train_num_points, oversample_ratio=config.oversample_ratio, importance_sample_ratio=config.importance_sample_ratio, contrastive_temperature=config.contrastive_temperature, ) self.post_init() def get_loss_dict( self, masks_queries_logits: Tensor, class_queries_logits: Tensor, contrastive_queries_logits: Tensor, mask_labels: Tensor, class_labels: Tensor, text_queries: Tensor, auxiliary_predictions: Dict[str, Tensor], calculate_contrastive_loss: bool, ) -> Dict[str, Tensor]: loss_dict: Dict[str, Tensor] = self.criterion( masks_queries_logits=masks_queries_logits, class_queries_logits=class_queries_logits, contrastive_queries_logits=contrastive_queries_logits, mask_labels=mask_labels, class_labels=class_labels, text_queries=text_queries, auxiliary_predictions=auxiliary_predictions, calculate_contrastive_loss=calculate_contrastive_loss, ) # weight each loss by `self.weight_dict[<LOSS_NAME>]` including auxiliary losses for key, weight in self.weight_dict.items(): for loss_key, loss in loss_dict.items(): if key in loss_key: loss *= weight return loss_dict def get_loss(self, loss_dict: Dict[str, Tensor]) -> Tensor: return sum(loss_dict.values()) @add_start_docstrings_to_model_forward(ONEFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=OneFormerForUniversalSegmentationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Tensor, task_inputs: Tensor, text_inputs: Optional[Tensor] = None, mask_labels: Optional[List[Tensor]] = None, class_labels: Optional[List[Tensor]] = None, pixel_mask: Optional[Tensor] = None, output_auxiliary_logits: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> OneFormerForUniversalSegmentationOutput: r""" text_inputs (`List[torch.Tensor]`, *optional*): Tensor fof shape `(num_queries, sequence_length)` to be fed to a model mask_labels (`List[torch.Tensor]`, *optional*): List of mask labels of shape `(num_labels, height, width)` to be fed to a model class_labels (`List[torch.LongTensor]`, *optional*): list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`. Returns: `OneFormerUniversalSegmentationOutput` Example: Universal segmentation example: ```python >>> from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation >>> from PIL import Image >>> import requests >>> import torch >>> # load OneFormer fine-tuned on ADE20k for universal segmentation >>> processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny") >>> model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny") >>> url = ( ... "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" ... ) >>> image = Image.open(requests.get(url, stream=True).raw) >>> # Semantic Segmentation >>> inputs = processor(image, ["semantic"], return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # model predicts class_queries_logits of shape `(batch_size, num_queries)` >>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)` >>> class_queries_logits = outputs.class_queries_logits >>> masks_queries_logits = outputs.masks_queries_logits >>> # you can pass them to processor for semantic postprocessing >>> predicted_semantic_map = processor.post_process_semantic_segmentation( ... outputs, target_sizes=[image.size[::-1]] ... )[0] >>> f"👉 Semantic Predictions Shape: {list(predicted_semantic_map.shape)}" '👉 Semantic Predictions Shape: [512, 683]' >>> # Instance Segmentation >>> inputs = processor(image, ["instance"], return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # model predicts class_queries_logits of shape `(batch_size, num_queries)` >>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)` >>> class_queries_logits = outputs.class_queries_logits >>> masks_queries_logits = outputs.masks_queries_logits >>> # you can pass them to processor for instance postprocessing >>> predicted_instance_map = processor.post_process_instance_segmentation( ... outputs, target_sizes=[image.size[::-1]] ... )[0]["segmentation"] >>> f"👉 Instance Predictions Shape: {list(predicted_instance_map.shape)}" '👉 Instance Predictions Shape: [512, 683]' >>> # Panoptic Segmentation >>> inputs = processor(image, ["panoptic"], return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # model predicts class_queries_logits of shape `(batch_size, num_queries)` >>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)` >>> class_queries_logits = outputs.class_queries_logits >>> masks_queries_logits = outputs.masks_queries_logits >>> # you can pass them to processor for panoptic postprocessing >>> predicted_panoptic_map = processor.post_process_panoptic_segmentation( ... outputs, target_sizes=[image.size[::-1]] ... )[0]["segmentation"] >>> f"👉 Panoptic Predictions Shape: {list(predicted_panoptic_map.shape)}" '👉 Panoptic Predictions Shape: [512, 683]' ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model( pixel_values=pixel_values, task_inputs=task_inputs, text_inputs=text_inputs, pixel_mask=pixel_mask, output_hidden_states=output_hidden_states or self.config.use_auxiliary_loss, output_attentions=output_attentions, return_dict=True, ) loss, loss_dict, auxiliary_predictions = None, None, None class_queries_logits = outputs.transformer_decoder_class_predictions masks_queries_logits = outputs.transformer_decoder_mask_predictions contrastive_queries_logits = outputs.transformer_decoder_contrastive_queries auxiliary_predictions = outputs.transformer_decoder_auxiliary_predictions text_queries = outputs.text_queries if mask_labels is not None and class_labels is not None: loss_dict: Dict[str, Tensor] = self.get_loss_dict( masks_queries_logits=masks_queries_logits, class_queries_logits=class_queries_logits, contrastive_queries_logits=contrastive_queries_logits, mask_labels=mask_labels, class_labels=class_labels, text_queries=text_queries, auxiliary_predictions=auxiliary_predictions, calculate_contrastive_loss=self.config.contrastive_temperature is not None, ) loss = self.get_loss(loss_dict) output_auxiliary_logits = ( self.config.output_auxiliary_logits if output_auxiliary_logits is None else output_auxiliary_logits ) if not output_auxiliary_logits: auxiliary_predictions = None output = OneFormerForUniversalSegmentationOutput( class_queries_logits=class_queries_logits, masks_queries_logits=masks_queries_logits, auxiliary_predictions=auxiliary_predictions, loss=loss, **outputs, ) if not return_dict: output = tuple(v for v in output.values()) if loss is not None: output = (loss) + output return output
transformers/src/transformers/models/oneformer/modeling_oneformer.py/0
{ "file_path": "transformers/src/transformers/models/oneformer/modeling_oneformer.py", "repo_id": "transformers", "token_count": 62740 }
352
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OWLv2 model configuration""" import os from typing import TYPE_CHECKING, Dict, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/owlv2-base-patch16": "https://huggingface.co/google/owlv2-base-patch16/resolve/main/config.json", } # Copied from transformers.models.owlvit.configuration_owlvit.OwlViTTextConfig with OwlViT->Owlv2, owlvit-base-patch32->owlv2-base-patch16, owlvit->owlv2, OWL-ViT->OWLv2 class Owlv2TextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`Owlv2TextModel`]. It is used to instantiate an Owlv2 text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Owlv2 [google/owlv2-base-patch16](https://huggingface.co/google/owlv2-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 49408): Vocabulary size of the OWLv2 text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Owlv2TextModel`]. hidden_size (`int`, *optional*, defaults to 512): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 16): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). pad_token_id (`int`, *optional*, defaults to 0): The id of the padding token in the input sequences. bos_token_id (`int`, *optional*, defaults to 49406): The id of the beginning-of-sequence token in the input sequences. eos_token_id (`int`, *optional*, defaults to 49407): The id of the end-of-sequence token in the input sequences. Example: ```python >>> from transformers import Owlv2TextConfig, Owlv2TextModel >>> # Initializing a Owlv2TextModel with google/owlv2-base-patch16 style configuration >>> configuration = Owlv2TextConfig() >>> # Initializing a Owlv2TextConfig from the google/owlv2-base-patch16 style configuration >>> model = Owlv2TextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "owlv2_text_model" def __init__( self, vocab_size=49408, hidden_size=512, intermediate_size=2048, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=16, hidden_act="quick_gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=0, bos_token_id=49406, eos_token_id=49407, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) # get the text config dict if we are loading from Owlv2Config if config_dict.get("model_type") == "owlv2": config_dict = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) # Copied from transformers.models.owlvit.configuration_owlvit.OwlViTVisionConfig with OwlViT->Owlv2, owlvit-base-patch32->owlv2-base-patch16, owlvit->owlv2, OWL-ViT->OWLv2, 32->16 class Owlv2VisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`Owlv2VisionModel`]. It is used to instantiate an OWLv2 image encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWLv2 [google/owlv2-base-patch16](https://huggingface.co/google/owlv2-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. image_size (`int`, *optional*, defaults to 768): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import Owlv2VisionConfig, Owlv2VisionModel >>> # Initializing a Owlv2VisionModel with google/owlv2-base-patch16 style configuration >>> configuration = Owlv2VisionConfig() >>> # Initializing a Owlv2VisionModel model from the google/owlv2-base-patch16 style configuration >>> model = Owlv2VisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "owlv2_vision_model" def __init__( self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=768, patch_size=16, hidden_act="quick_gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) # get the vision config dict if we are loading from Owlv2Config if config_dict.get("model_type") == "owlv2": config_dict = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) # Copied from transformers.models.owlvit.configuration_owlvit.OwlViTConfig with OwlViT->Owlv2, owlvit-base-patch32->owlv2-base-patch16, owlvit->owlv2, OWL-ViT->OWLv2 class Owlv2Config(PretrainedConfig): r""" [`Owlv2Config`] is the configuration class to store the configuration of an [`Owlv2Model`]. It is used to instantiate an OWLv2 model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWLv2 [google/owlv2-base-patch16](https://huggingface.co/google/owlv2-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Owlv2TextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Owlv2VisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The inital value of the *logit_scale* parameter. Default is used as per the original OWLv2 implementation. return_dict (`bool`, *optional*, defaults to `True`): Whether or not the model should return a dictionary. If `False`, returns a tuple. kwargs (*optional*): Dictionary of keyword arguments. """ model_type = "owlv2" def __init__( self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, return_dict=True, **kwargs, ): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info("text_config is None. Initializing the Owlv2TextConfig with default values.") if vision_config is None: vision_config = {} logger.info("vision_config is None. initializing the Owlv2VisionConfig with default values.") self.text_config = Owlv2TextConfig(**text_config) self.vision_config = Owlv2VisionConfig(**vision_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.return_dict = return_dict self.initializer_factor = 1.0 @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) @classmethod def from_text_vision_configs(cls, text_config: Dict, vision_config: Dict, **kwargs): r""" Instantiate a [`Owlv2Config`] (or a derived class) from owlv2 text model configuration and owlv2 vision model configuration. Returns: [`Owlv2Config`]: An instance of a configuration object """ config_dict = {} config_dict["text_config"] = text_config config_dict["vision_config"] = vision_config return cls.from_dict(config_dict, **kwargs)
transformers/src/transformers/models/owlv2/configuration_owlv2.py/0
{ "file_path": "transformers/src/transformers/models/owlv2/configuration_owlv2.py", "repo_id": "transformers", "token_count": 5978 }
353
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PatchTST model configuration""" from typing import List, Optional, Union from transformers.configuration_utils import PretrainedConfig from transformers.utils import logging logger = logging.get_logger(__name__) PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP = { "ibm/patchtst-base": "https://huggingface.co/ibm/patchtst-base/resolve/main/config.json", # See all PatchTST models at https://huggingface.co/ibm/models?filter=patchtst } class PatchTSTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`PatchTSTModel`]. It is used to instantiate an PatchTST model according to the specified arguments, defining the model architecture. [ibm/patchtst](https://huggingface.co/ibm/patchtst) architecture. Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_input_channels (`int`, *optional*, defaults to 1): The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of multivariate targets. context_length (`int`, *optional*, defaults to 32): The context length of the input sequence. distribution_output (`str`, *optional*, defaults to `"student_t"`): The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or "negative_binomial". loss (`str`, *optional*, defaults to `"mse"`): The loss function for the model corresponding to the `distribution_output` head. For parametric distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared error "mse". patch_length (`int`, *optional*, defaults to 1): Define the patch length of the patchification process. patch_stride (`int`, *optional*, defaults to 1): Define the stride of the patchification process. num_hidden_layers (`int`, *optional*, defaults to 3): Number of hidden layers. d_model (`int`, *optional*, defaults to 128): Dimensionality of the transformer layers. num_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. share_embedding (`bool`, *optional*, defaults to `True`): Sharing the input embedding across all channels. channel_attention (`bool`, *optional*, defaults to `False`): Activate channel attention block in the Transformer to allow channels to attend each other. ffn_dim (`int`, *optional*, defaults to 512): Dimension of the "intermediate" (often named feed-forward) layer in the Transformer encoder. norm_type (`str` , *optional*, defaults to `"batchnorm"`): Normalization at each Transformer layer. Can be `"batchnorm"` or `"layernorm"`. norm_eps (`float`, *optional*, defaults to 1e-05): A value added to the denominator for numerical stability of normalization. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for the attention probabilities. dropout (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the Transformer. positional_dropout (`float`, *optional*, defaults to 0.0): The dropout probability in the positional embedding layer. path_dropout (`float`, *optional*, defaults to 0.0): The dropout path in the residual block. ff_dropout (`float`, *optional*, defaults to 0.0): The dropout probability used between the two layers of the feed-forward networks. bias (`bool`, *optional*, defaults to `True`): Whether to add bias in the feed-forward networks. activation_function (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (string) in the Transformer.`"gelu"` and `"relu"` are supported. pre_norm (`bool`, *optional*, defaults to `True`): Normalization is applied before self-attention if pre_norm is set to `True`. Otherwise, normalization is applied after residual block. positional_encoding_type (`str`, *optional*, defaults to `"sincos"`): Positional encodings. Options `"random"` and `"sincos"` are supported. use_cls_token (`bool`, *optional*, defaults to `False`): Whether cls token is used. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated normal weight initialization distribution. share_projection (`bool`, *optional*, defaults to `True`): Sharing the projection layer across different channels in the forecast head. scaling (`Union`, *optional*, defaults to `"std"`): Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the scaler is set to "mean". do_mask_input (`bool`, *optional*): Apply masking during the pretraining. mask_type (`str`, *optional*, defaults to `"random"`): Masking type. Only `"random"` and `"forecast"` are currently supported. random_mask_ratio (`float`, *optional*, defaults to 0.5): Masking ratio applied to mask the input data during random pretraining. num_forecast_mask_patches (`int` or `list`, *optional*, defaults to `[2]`): Number of patches to be masked at the end of each batch sample. If it is an integer, all the samples in the batch will have the same number of masked patches. If it is a list, samples in the batch will be randomly masked by numbers defined in the list. This argument is only used for forecast pretraining. channel_consistent_masking (`bool`, *optional*, defaults to `False`): If channel consistent masking is True, all the channels will have the same masking pattern. unmasked_channel_indices (`list`, *optional*): Indices of channels that are not masked during pretraining. Values in the list are number between 1 and `num_input_channels` mask_value (`int`, *optional*, defaults to 0): Values in the masked patches will be filled by `mask_value`. pooling_type (`str`, *optional*, defaults to `"mean"`): Pooling of the embedding. `"mean"`, `"max"` and `None` are supported. head_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for head. prediction_length (`int`, *optional*, defaults to 24): The prediction horizon that the model will output. num_targets (`int`, *optional*, defaults to 1): Number of targets for regression and classification tasks. For classification, it is the number of classes. output_range (`list`, *optional*): Output range for regression task. The range of output values can be set to enforce the model to produce values within a range. num_parallel_samples (`int`, *optional*, defaults to 100): The number of samples is generated in parallel for probabilistic prediction. ```python >>> from transformers import PatchTSTConfig, PatchTSTModel >>> # Initializing an PatchTST configuration with 12 time steps for prediction >>> configuration = PatchTSTConfig(prediction_length=12) >>> # Randomly initializing a model (with random weights) from the configuration >>> model = PatchTSTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "patchtst" attribute_map = { "hidden_size": "d_model", "num_attention_heads": "num_attention_heads", "num_hidden_layers": "num_hidden_layers", } def __init__( self, # time series specific configuration num_input_channels: int = 1, context_length: int = 32, distribution_output: str = "student_t", loss: str = "mse", # PatchTST arguments patch_length: int = 1, patch_stride: int = 1, # Transformer architecture configuration num_hidden_layers: int = 3, d_model: int = 128, num_attention_heads: int = 4, share_embedding: bool = True, channel_attention: bool = False, ffn_dim: int = 512, norm_type: str = "batchnorm", norm_eps: float = 1e-05, attention_dropout: float = 0.0, dropout: float = 0.0, positional_dropout: float = 0.0, path_dropout: float = 0.0, ff_dropout: float = 0.0, bias: bool = True, activation_function: str = "gelu", pre_norm: bool = True, positional_encoding_type: str = "sincos", use_cls_token: bool = False, init_std: float = 0.02, share_projection: bool = True, scaling: Optional[Union[str, bool]] = "std", # mask pretraining do_mask_input: Optional[bool] = None, mask_type: str = "random", random_mask_ratio: float = 0.5, num_forecast_mask_patches: Optional[Union[List[int], int]] = [2], channel_consistent_masking: Optional[bool] = False, unmasked_channel_indices: Optional[List[int]] = None, mask_value: int = 0, # head pooling_type: str = "mean", head_dropout: float = 0.0, prediction_length: int = 24, num_targets: int = 1, output_range: Optional[List] = None, # distribution head num_parallel_samples: int = 100, **kwargs, ): # time series specific configuration self.context_length = context_length self.num_input_channels = num_input_channels # n_vars self.loss = loss self.distribution_output = distribution_output self.num_parallel_samples = num_parallel_samples # Transformer architecture configuration self.d_model = d_model self.num_attention_heads = num_attention_heads self.ffn_dim = ffn_dim self.num_hidden_layers = num_hidden_layers self.dropout = dropout self.attention_dropout = attention_dropout self.share_embedding = share_embedding self.channel_attention = channel_attention self.norm_type = norm_type self.norm_eps = norm_eps self.positional_dropout = positional_dropout self.path_dropout = path_dropout self.ff_dropout = ff_dropout self.bias = bias self.activation_function = activation_function self.pre_norm = pre_norm self.positional_encoding_type = positional_encoding_type self.use_cls_token = use_cls_token self.init_std = init_std self.scaling = scaling # PatchTST parameters self.patch_length = patch_length self.patch_stride = patch_stride # Mask pretraining self.do_mask_input = do_mask_input self.mask_type = mask_type self.random_mask_ratio = random_mask_ratio # for random masking self.num_forecast_mask_patches = num_forecast_mask_patches # for forecast masking self.channel_consistent_masking = channel_consistent_masking self.unmasked_channel_indices = unmasked_channel_indices self.mask_value = mask_value # general head params self.pooling_type = pooling_type self.head_dropout = head_dropout # For prediction head self.share_projection = share_projection self.prediction_length = prediction_length # For prediction and regression head self.num_parallel_samples = num_parallel_samples # Regression self.num_targets = num_targets self.output_range = output_range super().__init__(**kwargs)
transformers/src/transformers/models/patchtst/configuration_patchtst.py/0
{ "file_path": "transformers/src/transformers/models/patchtst/configuration_patchtst.py", "repo_id": "transformers", "token_count": 4842 }
354
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import re import torch from flax.traverse_util import flatten_dict from t5x import checkpoints from transformers import ( AutoTokenizer, Pix2StructConfig, Pix2StructForConditionalGeneration, Pix2StructImageProcessor, Pix2StructProcessor, Pix2StructTextConfig, Pix2StructVisionConfig, ) def get_flax_param(t5x_checkpoint_path): flax_params = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path) flax_params = flatten_dict(flax_params) return flax_params def rename_and_convert_flax_params(flax_dict): converted_dict = {} CONVERSION_MAPPING = { "token_embedder": "embeddings", "encoder_norm": "layernorm", "kernel": "weight", ".out": ".output", "scale": "weight", "embedders_0.pos_embedding": "row_embedder.weight", "embedders_1.pos_embedding": "column_embedder.weight", } DECODER_CONVERSION_MAPPING = { "query": "attention.query", "key": "attention.key", "value": "attention.value", "output.dense": "output", "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o", "pre_self_attention_layer_norm": "self_attention.layer_norm", "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm", "mlp.": "mlp.DenseReluDense.", "pre_mlp_layer_norm": "mlp.layer_norm", "self_attention.o": "self_attention.attention.o", "decoder.embeddings.embedding": "decoder.embed_tokens.weight", "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight", "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight", "decoder.logits_dense.weight": "decoder.lm_head.weight", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key new_key = ".".join(key[1:]) # rename the key for old, new in CONVERSION_MAPPING.items(): new_key = new_key.replace(old, new) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): new_key = new_key.replace(old, new) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number new_key = re.sub(r"layers_(\d+)", r"layer.\1", new_key) new_key = new_key.replace("encoder", "encoder.encoder") elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number new_key = re.sub(r"layers_(\d+)", r"layer.\1", new_key) converted_dict[new_key] = flax_dict[key] converted_torch_dict = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): converted_torch_dict[key] = torch.from_numpy(converted_dict[key].T) else: converted_torch_dict[key] = torch.from_numpy(converted_dict[key]) return converted_torch_dict def convert_pix2struct_original_pytorch_checkpoint_to_hf( t5x_checkpoint_path, pytorch_dump_folder_path, use_large=False, is_vqa=False ): flax_params = get_flax_param(t5x_checkpoint_path) if not use_large: encoder_config = Pix2StructVisionConfig() decoder_config = Pix2StructTextConfig() else: encoder_config = Pix2StructVisionConfig( hidden_size=1536, d_ff=3968, num_attention_heads=24, num_hidden_layers=18 ) decoder_config = Pix2StructTextConfig(hidden_size=1536, d_ff=3968, num_heads=24, num_layers=18) config = Pix2StructConfig( vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=is_vqa ) model = Pix2StructForConditionalGeneration(config) torch_params = rename_and_convert_flax_params(flax_params) model.load_state_dict(torch_params) tok = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer") image_processor = Pix2StructImageProcessor() processor = Pix2StructProcessor(image_processor=image_processor, tokenizer=tok) if use_large: processor.image_processor.max_patches = 4096 processor.image_processor.is_vqa = True # mkdir if needed os.makedirs(pytorch_dump_folder_path, exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) print("Model saved in {}".format(pytorch_dump_folder_path)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--use_large", action="store_true", help="Use large model.") parser.add_argument("--is_vqa", action="store_true", help="Use large model.") args = parser.parse_args() convert_pix2struct_original_pytorch_checkpoint_to_hf( args.t5x_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
transformers/src/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py", "repo_id": "transformers", "token_count": 2437 }
355
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Pop2Piano model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) POP2PIANO_PRETRAINED_CONFIG_ARCHIVE_MAP = { "sweetcocoa/pop2piano": "https://huggingface.co/sweetcocoa/pop2piano/blob/main/config.json" } class Pop2PianoConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Pop2PianoForConditionalGeneration`]. It is used to instantiate a Pop2PianoForConditionalGeneration model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pop2Piano [sweetcocoa/pop2piano](https://huggingface.co/sweetcocoa/pop2piano) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 2400): Vocabulary size of the `Pop2PianoForConditionalGeneration` model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Pop2PianoForConditionalGeneration`]. composer_vocab_size (`int`, *optional*, defaults to 21): Denotes the number of composers. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will be defined as `num_heads * d_kv`. d_ff (`int`, *optional*, defaults to 2048): Size of the intermediate feed forward layer in each `Pop2PianoBlock`. num_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. layer_norm_epsilon (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). dense_act_fn (`string`, *optional*, defaults to `"relu"`): Type of Activation Function to be used in `Pop2PianoDenseActDense` and in `Pop2PianoDenseGatedActDense`. """ model_type = "pop2piano" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=2400, composer_vocab_size=21, d_model=512, d_kv=64, d_ff=2048, num_layers=6, num_decoder_layers=None, num_heads=8, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-6, initializer_factor=1.0, feed_forward_proj="gated-gelu", # noqa is_encoder_decoder=True, use_cache=True, pad_token_id=0, eos_token_id=1, dense_act_fn="relu", **kwargs, ): self.vocab_size = vocab_size self.composer_vocab_size = composer_vocab_size self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.feed_forward_proj = feed_forward_proj self.use_cache = use_cache self.dense_act_fn = dense_act_fn self.is_gated_act = self.feed_forward_proj.split("-")[0] == "gated" self.hidden_size = self.d_model self.num_attention_heads = num_heads self.num_hidden_layers = num_layers super().__init__( pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs, )
transformers/src/transformers/models/pop2piano/configuration_pop2piano.py/0
{ "file_path": "transformers/src/transformers/models/pop2piano/configuration_pop2piano.py", "repo_id": "transformers", "token_count": 2409 }
356
# coding=utf-8 # Copyright 2022 The REALM authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch REALM model.""" import math import os from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ModelOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_realm import RealmConfig logger = logging.get_logger(__name__) _EMBEDDER_CHECKPOINT_FOR_DOC = "google/realm-cc-news-pretrained-embedder" _ENCODER_CHECKPOINT_FOR_DOC = "google/realm-cc-news-pretrained-encoder" _SCORER_CHECKPOINT_FOR_DOC = "google/realm-cc-news-pretrained-scorer" _CONFIG_FOR_DOC = "RealmConfig" REALM_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/realm-cc-news-pretrained-embedder", "google/realm-cc-news-pretrained-encoder", "google/realm-cc-news-pretrained-scorer", "google/realm-cc-news-pretrained-openqa", "google/realm-orqa-nq-openqa", "google/realm-orqa-nq-reader", "google/realm-orqa-wq-openqa", "google/realm-orqa-wq-reader", # See all REALM models at https://huggingface.co/models?filter=realm ] def load_tf_weights_in_realm(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): if isinstance(model, RealmReader) and "reader" not in name: logger.info(f"Skipping {name} as it is not {model.__class__.__name__}'s parameter") continue # For pretrained openqa reader if (name.startswith("bert") or name.startswith("cls")) and isinstance(model, RealmForOpenQA): name = name.replace("bert/", "reader/realm/") name = name.replace("cls/", "reader/cls/") # For pretrained encoder if (name.startswith("bert") or name.startswith("cls")) and isinstance(model, RealmKnowledgeAugEncoder): name = name.replace("bert/", "realm/") # For finetuned reader if name.startswith("reader"): reader_prefix = "" if isinstance(model, RealmReader) else "reader/" name = name.replace("reader/module/bert/", f"{reader_prefix}realm/") name = name.replace("reader/module/cls/", f"{reader_prefix}cls/") name = name.replace("reader/dense/", f"{reader_prefix}qa_outputs/dense_intermediate/") name = name.replace("reader/dense_1/", f"{reader_prefix}qa_outputs/dense_output/") name = name.replace("reader/layer_normalization", f"{reader_prefix}qa_outputs/layer_normalization") # For embedder and scorer if name.startswith("module/module/module/"): # finetuned embedder_prefix = "" if isinstance(model, RealmEmbedder) else "embedder/" name = name.replace("module/module/module/module/bert/", f"{embedder_prefix}realm/") name = name.replace("module/module/module/LayerNorm/", f"{embedder_prefix}cls/LayerNorm/") name = name.replace("module/module/module/dense/", f"{embedder_prefix}cls/dense/") name = name.replace("module/module/module/module/cls/predictions/", f"{embedder_prefix}cls/predictions/") name = name.replace("module/module/module/bert/", f"{embedder_prefix}realm/") name = name.replace("module/module/module/cls/predictions/", f"{embedder_prefix}cls/predictions/") elif name.startswith("module/module/"): # pretrained embedder_prefix = "" if isinstance(model, RealmEmbedder) else "embedder/" name = name.replace("module/module/LayerNorm/", f"{embedder_prefix}cls/LayerNorm/") name = name.replace("module/module/dense/", f"{embedder_prefix}cls/dense/") name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) return model # Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->Realm class RealmEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values_length: int = 0, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Realm class RealmSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in RealmModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Realm class RealmSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Realm class RealmAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = RealmSelfAttention(config, position_embedding_type=position_embedding_type) self.output = RealmSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Realm class RealmIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Realm class RealmOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Realm class RealmLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = RealmAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = RealmAttention(config, position_embedding_type="absolute") self.intermediate = RealmIntermediate(config) self.output = RealmOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Realm class RealmEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([RealmLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Realm class RealmPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output @dataclass class RealmEmbedderOutput(ModelOutput): """ Outputs of [`RealmEmbedder`] models. Args: projected_score (`torch.FloatTensor` of shape `(batch_size, config.retriever_proj_size)`): Projected score. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ projected_score: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class RealmScorerOutput(ModelOutput): """ Outputs of [`RealmScorer`] models. Args: relevance_score (`torch.FloatTensor` of shape `(batch_size, config.num_candidates)`): The relevance score of document candidates (before softmax). query_score (`torch.FloatTensor` of shape `(batch_size, config.retriever_proj_size)`): Query score derived from the query embedder. candidate_score (`torch.FloatTensor` of shape `(batch_size, config.num_candidates, config.retriever_proj_size)`): Candidate score derived from the embedder. """ relevance_score: torch.FloatTensor = None query_score: torch.FloatTensor = None candidate_score: torch.FloatTensor = None @dataclass class RealmReaderOutput(ModelOutput): """ Outputs of [`RealmReader`] models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `start_positions`, `end_positions`, `has_answers` are provided): Total loss. retriever_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `start_positions`, `end_positions`, `has_answers` are provided): Retriever loss. reader_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `start_positions`, `end_positions`, `has_answers` are provided): Reader loss. retriever_correct (`torch.BoolTensor` of shape `(config.searcher_beam_size,)`, *optional*): Whether or not an evidence block contains answer. reader_correct (`torch.BoolTensor` of shape `(config.reader_beam_size, num_candidates)`, *optional*): Whether or not a span candidate contains answer. block_idx (`torch.LongTensor` of shape `()`): The index of the retrieved evidence block in which the predicted answer is most likely. candidate (`torch.LongTensor` of shape `()`): The index of the retrieved span candidates in which the predicted answer is most likely. start_pos (`torch.IntTensor` of shape `()`): Predicted answer starting position in *RealmReader*'s inputs. end_pos (`torch.IntTensor` of shape `()`): Predicted answer ending position in *RealmReader*'s inputs. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: torch.FloatTensor = None retriever_loss: torch.FloatTensor = None reader_loss: torch.FloatTensor = None retriever_correct: torch.BoolTensor = None reader_correct: torch.BoolTensor = None block_idx: torch.LongTensor = None candidate: torch.LongTensor = None start_pos: torch.int32 = None end_pos: torch.int32 = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class RealmForOpenQAOutput(ModelOutput): """ Outputs of [`RealmForOpenQA`] models. Args: reader_output (`dict`): Reader output. predicted_answer_ids (`torch.LongTensor` of shape `(answer_sequence_length)`): Predicted answer ids. """ reader_output: dict = None predicted_answer_ids: torch.LongTensor = None class RealmPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class RealmLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = RealmPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class RealmOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = RealmLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class RealmScorerProjection(nn.Module): def __init__(self, config): super().__init__() self.predictions = RealmLMPredictionHead(config) self.dense = nn.Linear(config.hidden_size, config.retriever_proj_size) self.LayerNorm = nn.LayerNorm(config.retriever_proj_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class RealmReaderProjection(nn.Module): def __init__(self, config): super().__init__() self.config = config self.dense_intermediate = nn.Linear(config.hidden_size, config.span_hidden_size * 2) self.dense_output = nn.Linear(config.span_hidden_size, 1) self.layer_normalization = nn.LayerNorm(config.span_hidden_size, eps=config.reader_layer_norm_eps) self.relu = nn.ReLU() def forward(self, hidden_states, block_mask): def span_candidates(masks): """ Generate span candidates. Args: masks: <bool> [num_retrievals, max_sequence_len] Returns: starts: <int32> [num_spans] ends: <int32> [num_spans] span_masks: <int32> [num_retrievals, num_spans] whether spans locate in evidence block. """ _, max_sequence_len = masks.shape def _spans_given_width(width): current_starts = torch.arange(max_sequence_len - width + 1, device=masks.device) current_ends = torch.arange(width - 1, max_sequence_len, device=masks.device) return current_starts, current_ends starts, ends = zip(*(_spans_given_width(w + 1) for w in range(self.config.max_span_width))) # [num_spans] starts = torch.cat(starts, 0) ends = torch.cat(ends, 0) # [num_retrievals, num_spans] start_masks = torch.index_select(masks, dim=-1, index=starts) end_masks = torch.index_select(masks, dim=-1, index=ends) span_masks = start_masks * end_masks return starts, ends, span_masks def mask_to_score(mask, dtype=torch.float32): return (1.0 - mask.type(dtype)) * torch.finfo(dtype).min # [reader_beam_size, max_sequence_len, span_hidden_size * 2] hidden_states = self.dense_intermediate(hidden_states) # [reader_beam_size, max_sequence_len, span_hidden_size] start_projection, end_projection = hidden_states.chunk(2, dim=-1) candidate_starts, candidate_ends, candidate_mask = span_candidates(block_mask) candidate_start_projections = torch.index_select(start_projection, dim=1, index=candidate_starts) candidate_end_projections = torch.index_select(end_projection, dim=1, index=candidate_ends) candidate_hidden = candidate_start_projections + candidate_end_projections # [reader_beam_size, num_candidates, span_hidden_size] candidate_hidden = self.relu(candidate_hidden) # [reader_beam_size, num_candidates, span_hidden_size] candidate_hidden = self.layer_normalization(candidate_hidden) # [reader_beam_size, num_candidates] reader_logits = self.dense_output(candidate_hidden).squeeze(-1) # [reader_beam_size, num_candidates] reader_logits += mask_to_score(candidate_mask, dtype=reader_logits.dtype) return reader_logits, candidate_starts, candidate_ends REALM_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RealmConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ REALM_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class RealmPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RealmConfig load_tf_weights = load_tf_weights_in_realm base_model_prefix = "realm" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _flatten_inputs(self, *inputs): """Flatten inputs' shape to (-1, input_shape[-1])""" flattened_inputs = [] for tensor in inputs: if tensor is None: flattened_inputs.append(None) else: input_shape = tensor.shape if len(input_shape) > 2: tensor = tensor.view((-1, input_shape[-1])) flattened_inputs.append(tensor) return flattened_inputs class RealmBertModel(RealmPreTrainedModel): """ Same as the original BertModel but remove docstrings. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = RealmEmbeddings(config) self.encoder = RealmEncoder(config) self.pooler = RealmPooler(config) if add_pooling_layer else None # Weights initialization is mostly managed by other Realm models, # but we also have them initialized here to keep a consistency. self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings( "The embedder of REALM outputting projected score that will be used to calculate relevance score.", REALM_START_DOCSTRING, ) class RealmEmbedder(RealmPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.realm = RealmBertModel(self.config) self.cls = RealmScorerProjection(self.config) self.post_init() def get_input_embeddings(self): return self.realm.embeddings.word_embeddings def set_input_embeddings(self, value): self.realm.embeddings.word_embeddings = value @add_start_docstrings_to_model_forward(REALM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=RealmEmbedderOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, RealmEmbedderOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, RealmEmbedder >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google/realm-cc-news-pretrained-embedder") >>> model = RealmEmbedder.from_pretrained("google/realm-cc-news-pretrained-embedder") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> projected_score = outputs.projected_score ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict realm_outputs = self.realm( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # [batch_size, hidden_size] pooler_output = realm_outputs[1] # [batch_size, retriever_proj_size] projected_score = self.cls(pooler_output) if not return_dict: return (projected_score,) + realm_outputs[2:4] else: return RealmEmbedderOutput( projected_score=projected_score, hidden_states=realm_outputs.hidden_states, attentions=realm_outputs.attentions, ) @add_start_docstrings( "The scorer of REALM outputting relevance scores representing the score of document candidates (before softmax).", REALM_START_DOCSTRING, ) class RealmScorer(RealmPreTrainedModel): r""" Args: query_embedder ([`RealmEmbedder`]): Embedder for input sequences. If not specified, it will use the same embedder as candidate sequences. """ def __init__(self, config, query_embedder=None): super().__init__(config) self.embedder = RealmEmbedder(self.config) self.query_embedder = query_embedder if query_embedder is not None else self.embedder self.post_init() @add_start_docstrings_to_model_forward(REALM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=RealmScorerOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, candidate_input_ids: Optional[torch.LongTensor] = None, candidate_attention_mask: Optional[torch.FloatTensor] = None, candidate_token_type_ids: Optional[torch.LongTensor] = None, candidate_inputs_embeds: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, RealmScorerOutput]: r""" candidate_input_ids (`torch.LongTensor` of shape `(batch_size, num_candidates, sequence_length)`): Indices of candidate input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) candidate_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_candidates, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) candidate_token_type_ids (`torch.LongTensor` of shape `(batch_size, num_candidates, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) candidate_inputs_embeds (`torch.FloatTensor` of shape `(batch_size * num_candidates, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `candidate_input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *candidate_input_ids* indices into associated vectors than the model's internal embedding lookup matrix. Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, RealmScorer >>> tokenizer = AutoTokenizer.from_pretrained("google/realm-cc-news-pretrained-scorer") >>> model = RealmScorer.from_pretrained("google/realm-cc-news-pretrained-scorer", num_candidates=2) >>> # batch_size = 2, num_candidates = 2 >>> input_texts = ["How are you?", "What is the item in the picture?"] >>> candidates_texts = [["Hello world!", "Nice to meet you!"], ["A cute cat.", "An adorable dog."]] >>> inputs = tokenizer(input_texts, return_tensors="pt") >>> candidates_inputs = tokenizer.batch_encode_candidates(candidates_texts, max_length=10, return_tensors="pt") >>> outputs = model( ... **inputs, ... candidate_input_ids=candidates_inputs.input_ids, ... candidate_attention_mask=candidates_inputs.attention_mask, ... candidate_token_type_ids=candidates_inputs.token_type_ids, ... ) >>> relevance_score = outputs.relevance_score ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None and inputs_embeds is None: raise ValueError("You have to specify either input_ids or input_embeds.") if candidate_input_ids is None and candidate_inputs_embeds is None: raise ValueError("You have to specify either candidate_input_ids or candidate_inputs_embeds.") query_outputs = self.query_embedder( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # [batch_size * num_candidates, candidate_seq_len] (flattened_input_ids, flattened_attention_mask, flattened_token_type_ids) = self._flatten_inputs( candidate_input_ids, candidate_attention_mask, candidate_token_type_ids ) candidate_outputs = self.embedder( flattened_input_ids, attention_mask=flattened_attention_mask, token_type_ids=flattened_token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=candidate_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # [batch_size, retriever_proj_size] query_score = query_outputs[0] # [batch_size * num_candidates, retriever_proj_size] candidate_score = candidate_outputs[0] # [batch_size, num_candidates, retriever_proj_size] candidate_score = candidate_score.view(-1, self.config.num_candidates, self.config.retriever_proj_size) # [batch_size, num_candidates] relevance_score = torch.einsum("bd,bnd->bn", query_score, candidate_score) if not return_dict: return relevance_score, query_score, candidate_score return RealmScorerOutput( relevance_score=relevance_score, query_score=query_score, candidate_score=candidate_score ) @add_start_docstrings( "The knowledge-augmented encoder of REALM outputting masked language model logits and marginal log-likelihood" " loss.", REALM_START_DOCSTRING, ) class RealmKnowledgeAugEncoder(RealmPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder"] def __init__(self, config): super().__init__(config) self.realm = RealmBertModel(self.config) self.cls = RealmOnlyMLMHead(self.config) self.post_init() def get_input_embeddings(self): return self.realm.embeddings.word_embeddings def set_input_embeddings(self, value): self.realm.embeddings.word_embeddings = value def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward( REALM_INPUTS_DOCSTRING.format("batch_size, num_candidates, sequence_length") ) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, relevance_score: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, mlm_mask: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MaskedLMOutput]: r""" relevance_score (`torch.FloatTensor` of shape `(batch_size, num_candidates)`, *optional*): Relevance score derived from RealmScorer, must be specified if you want to compute the masked language modeling loss. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` mlm_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid calculating joint loss on certain positions. If not specified, the loss will not be masked. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, RealmKnowledgeAugEncoder >>> tokenizer = AutoTokenizer.from_pretrained("google/realm-cc-news-pretrained-encoder") >>> model = RealmKnowledgeAugEncoder.from_pretrained( ... "google/realm-cc-news-pretrained-encoder", num_candidates=2 ... ) >>> # batch_size = 2, num_candidates = 2 >>> text = [["Hello world!", "Nice to meet you!"], ["The cute cat.", "The adorable dog."]] >>> inputs = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict (flattened_input_ids, flattened_attention_mask, flattened_token_type_ids) = self._flatten_inputs( input_ids, attention_mask, token_type_ids ) joint_outputs = self.realm( flattened_input_ids, attention_mask=flattened_attention_mask, token_type_ids=flattened_token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # [batch_size * num_candidates, joint_seq_len, hidden_size] joint_output = joint_outputs[0] # [batch_size * num_candidates, joint_seq_len, vocab_size] prediction_scores = self.cls(joint_output) # [batch_size, num_candidates] candidate_score = relevance_score masked_lm_loss = None if labels is not None: if candidate_score is None: raise ValueError( "You have to specify `relevance_score` when `labels` is specified in order to compute loss." ) batch_size, seq_length = labels.size() if mlm_mask is None: mlm_mask = torch.ones_like(labels, dtype=torch.float32) else: mlm_mask = mlm_mask.type(torch.float32) # Compute marginal log-likelihood loss_fct = CrossEntropyLoss(reduction="none") # -100 index = padding token # [batch_size * num_candidates * joint_seq_len, vocab_size] mlm_logits = prediction_scores.view(-1, self.config.vocab_size) # [batch_size * num_candidates * joint_seq_len] mlm_targets = labels.tile(1, self.config.num_candidates).view(-1) # [batch_size, num_candidates, joint_seq_len] masked_lm_log_prob = -loss_fct(mlm_logits, mlm_targets).view( batch_size, self.config.num_candidates, seq_length ) # [batch_size, num_candidates, 1] candidate_log_prob = candidate_score.log_softmax(-1).unsqueeze(-1) # [batch_size, num_candidates, joint_seq_len] joint_gold_log_prob = candidate_log_prob + masked_lm_log_prob # [batch_size, joint_seq_len] marginal_gold_log_probs = joint_gold_log_prob.logsumexp(1) # [] masked_lm_loss = -torch.nansum(torch.sum(marginal_gold_log_probs * mlm_mask) / torch.sum(mlm_mask)) if not return_dict: output = (prediction_scores,) + joint_outputs[2:4] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=joint_outputs.hidden_states, attentions=joint_outputs.attentions, ) @add_start_docstrings("The reader of REALM.", REALM_START_DOCSTRING) class RealmReader(RealmPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.realm = RealmBertModel(config) self.cls = RealmOnlyMLMHead(config) self.qa_outputs = RealmReaderProjection(config) self.post_init() @add_start_docstrings_to_model_forward(REALM_INPUTS_DOCSTRING.format("reader_beam_size, sequence_length")) @replace_return_docstrings(output_type=RealmReaderOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, relevance_score: Optional[torch.FloatTensor] = None, block_mask: Optional[torch.BoolTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, has_answers: Optional[torch.BoolTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, RealmReaderOutput]: r""" relevance_score (`torch.FloatTensor` of shape `(searcher_beam_size,)`, *optional*): Relevance score, which must be specified if you want to compute the logits and marginal log loss. block_mask (`torch.BoolTensor` of shape `(searcher_beam_size, sequence_length)`, *optional*): The mask of the evidence block, which must be specified if you want to compute the logits and marginal log loss. start_positions (`torch.LongTensor` of shape `(searcher_beam_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(searcher_beam_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. has_answers (`torch.BoolTensor` of shape `(searcher_beam_size,)`, *optional*): Whether or not the evidence block has answer(s). Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if relevance_score is None: raise ValueError("You have to specify `relevance_score` to calculate logits and loss.") if block_mask is None: raise ValueError("You have to specify `block_mask` to separate question block and evidence block.") if token_type_ids.size(1) < self.config.max_span_width: raise ValueError("The input sequence length must be greater than or equal to config.max_span_width.") outputs = self.realm( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # [reader_beam_size, joint_seq_len, hidden_size] sequence_output = outputs[0] # [reader_beam_size, num_candidates], [num_candidates], [num_candidates] reader_logits, candidate_starts, candidate_ends = self.qa_outputs( sequence_output, block_mask[0 : self.config.reader_beam_size] ) # [searcher_beam_size, 1] retriever_logits = torch.unsqueeze(relevance_score[0 : self.config.reader_beam_size], -1) # [reader_beam_size, num_candidates] reader_logits += retriever_logits # [] predicted_block_index = torch.argmax(torch.max(reader_logits, dim=1).values) # [] predicted_candidate = torch.argmax(torch.max(reader_logits, dim=0).values) # [1] predicted_start = torch.index_select(candidate_starts, dim=0, index=predicted_candidate) # [1] predicted_end = torch.index_select(candidate_ends, dim=0, index=predicted_candidate) total_loss = None retriever_loss = None reader_loss = None retriever_correct = None reader_correct = None if start_positions is not None and end_positions is not None and has_answers is not None: def compute_correct_candidates(candidate_starts, candidate_ends, gold_starts, gold_ends): """Compute correct span.""" # [reader_beam_size, num_answers, num_candidates] is_gold_start = torch.eq( torch.unsqueeze(torch.unsqueeze(candidate_starts, 0), 0), torch.unsqueeze(gold_starts, -1) ) is_gold_end = torch.eq( torch.unsqueeze(torch.unsqueeze(candidate_ends, 0), 0), torch.unsqueeze(gold_ends, -1) ) # [reader_beam_size, num_candidates] return torch.any(torch.logical_and(is_gold_start, is_gold_end), 1) def marginal_log_loss(logits, is_correct): """Loss based on the negative marginal log-likelihood.""" def mask_to_score(mask, dtype=torch.float32): return (1.0 - mask.type(dtype)) * torch.finfo(dtype).min # [] log_numerator = torch.logsumexp(logits + mask_to_score(is_correct, dtype=logits.dtype), dim=-1) log_denominator = torch.logsumexp(logits, dim=-1) return log_denominator - log_numerator # sometimes the start/end positions are outside our model inputs, we ignore these terms # `-1` is reserved for no answer. ignored_index = sequence_output.size(1) start_positions = start_positions.clamp(-1, ignored_index) end_positions = end_positions.clamp(-1, ignored_index) retriever_correct = has_answers any_retriever_correct = torch.any(retriever_correct) reader_correct = compute_correct_candidates( candidate_starts=candidate_starts, candidate_ends=candidate_ends, gold_starts=start_positions[0 : self.config.reader_beam_size], gold_ends=end_positions[0 : self.config.reader_beam_size], ) any_reader_correct = torch.any(reader_correct) retriever_loss = marginal_log_loss(relevance_score, retriever_correct) reader_loss = marginal_log_loss(reader_logits.view(-1), reader_correct.view(-1)) retriever_loss *= any_retriever_correct.type(torch.float32) reader_loss *= any_reader_correct.type(torch.float32) total_loss = (retriever_loss + reader_loss).mean() if not return_dict: output = (predicted_block_index, predicted_candidate, predicted_start, predicted_end) + outputs[2:] return ( ((total_loss, retriever_loss, reader_loss, retriever_correct, reader_correct) + output) if total_loss is not None else output ) return RealmReaderOutput( loss=total_loss, retriever_loss=retriever_loss, reader_loss=reader_loss, retriever_correct=retriever_correct, reader_correct=reader_correct, block_idx=predicted_block_index, candidate=predicted_candidate, start_pos=predicted_start, end_pos=predicted_end, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) REALM_FOR_OPEN_QA_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token (should not be used in this model by design). [What are token type IDs?](../glossary#token-type-ids) answer_ids (`list` of shape `(num_answers, answer_length)`, *optional*): Answer ids for computing the marginal log-likelihood loss. Indices should be in `[-1, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-1` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "`RealmForOpenQA` for end-to-end open domain question answering.", REALM_START_DOCSTRING, ) class RealmForOpenQA(RealmPreTrainedModel): def __init__(self, config, retriever=None): super().__init__(config) self.embedder = RealmEmbedder(config) self.reader = RealmReader(config) self.register_buffer( "block_emb", torch.zeros(()).new_empty( size=(config.num_block_records, config.retriever_proj_size), dtype=torch.float32, device=torch.device("cpu"), ), ) self.retriever = retriever self.post_init() @property def searcher_beam_size(self): if self.training: return self.config.searcher_beam_size return self.config.reader_beam_size def block_embedding_to(self, device): """Send `self.block_emb` to a specific device. Args: device (`str` or `torch.device`): The device to which `self.block_emb` will be sent. """ self.block_emb = self.block_emb.to(device) @add_start_docstrings_to_model_forward(REALM_FOR_OPEN_QA_DOCSTRING.format("1, sequence_length")) @replace_return_docstrings(output_type=RealmForOpenQAOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor], attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, answer_ids: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, RealmForOpenQAOutput]: r""" Returns: Example: ```python >>> import torch >>> from transformers import RealmForOpenQA, RealmRetriever, AutoTokenizer >>> retriever = RealmRetriever.from_pretrained("google/realm-orqa-nq-openqa") >>> tokenizer = AutoTokenizer.from_pretrained("google/realm-orqa-nq-openqa") >>> model = RealmForOpenQA.from_pretrained("google/realm-orqa-nq-openqa", retriever=retriever) >>> question = "Who is the pioneer in modern computer science?" >>> question_ids = tokenizer([question], return_tensors="pt") >>> answer_ids = tokenizer( ... ["alan mathison turing"], ... add_special_tokens=False, ... return_token_type_ids=False, ... return_attention_mask=False, ... ).input_ids >>> reader_output, predicted_answer_ids = model(**question_ids, answer_ids=answer_ids, return_dict=False) >>> predicted_answer = tokenizer.decode(predicted_answer_ids) >>> loss = reader_output.loss ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and input_ids.shape[0] != 1: raise ValueError("The batch_size of the inputs must be 1.") question_outputs = self.embedder( input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, return_dict=True ) # [1, projection_size] question_projection = question_outputs[0] # CPU computation starts. # [1, block_emb_size] batch_scores = torch.einsum("BD,QD->QB", self.block_emb, question_projection.to(self.block_emb.device)) # [1, searcher_beam_size] _, retrieved_block_ids = torch.topk(batch_scores, k=self.searcher_beam_size, dim=-1) # [searcher_beam_size] retrieved_block_ids = retrieved_block_ids.squeeze() # [searcher_beam_size, projection_size] retrieved_block_emb = torch.index_select(self.block_emb, dim=0, index=retrieved_block_ids) # CPU computation ends. # Retrieve possible answers has_answers, start_pos, end_pos, concat_inputs = self.retriever( retrieved_block_ids.cpu(), input_ids, answer_ids, max_length=self.config.reader_seq_len ) concat_inputs = concat_inputs.to(self.reader.device) block_mask = concat_inputs.special_tokens_mask.type(torch.bool).to(device=self.reader.device) block_mask.logical_not_().logical_and_(concat_inputs.token_type_ids.type(torch.bool)) if has_answers is not None: has_answers = torch.tensor(has_answers, dtype=torch.bool, device=self.reader.device) start_pos = torch.tensor(start_pos, dtype=torch.long, device=self.reader.device) end_pos = torch.tensor(end_pos, dtype=torch.long, device=self.reader.device) # [searcher_beam_size] retrieved_logits = torch.einsum( "D,BD->B", question_projection.squeeze(), retrieved_block_emb.to(self.reader.device) ) reader_output = self.reader( input_ids=concat_inputs.input_ids[0 : self.config.reader_beam_size], attention_mask=concat_inputs.attention_mask[0 : self.config.reader_beam_size], token_type_ids=concat_inputs.token_type_ids[0 : self.config.reader_beam_size], relevance_score=retrieved_logits, block_mask=block_mask, has_answers=has_answers, start_positions=start_pos, end_positions=end_pos, return_dict=True, ) predicted_block = concat_inputs.input_ids[reader_output.block_idx] predicted_answer_ids = predicted_block[reader_output.start_pos : reader_output.end_pos + 1] if not return_dict: return reader_output, predicted_answer_ids return RealmForOpenQAOutput( reader_output=reader_output, predicted_answer_ids=predicted_answer_ids, )
transformers/src/transformers/models/realm/modeling_realm.py/0
{ "file_path": "transformers/src/transformers/models/realm/modeling_realm.py", "repo_id": "transformers", "token_count": 36138 }
357
# coding=utf-8 # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TensorFlow RegNet model.""" from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACT2FN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import ( TFPreTrainedModel, TFSequenceClassificationLoss, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "RegNetConfig" # Base docstring _CHECKPOINT_FOR_DOC = "facebook/regnet-y-040" _EXPECTED_OUTPUT_SHAPE = [1, 1088, 7, 7] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "facebook/regnet-y-040" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class TFRegNetConvLayer(keras.layers.Layer): def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, groups: int = 1, activation: Optional[str] = "relu", **kwargs, ): super().__init__(**kwargs) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb self.padding = keras.layers.ZeroPadding2D(padding=kernel_size // 2) self.convolution = keras.layers.Conv2D( filters=out_channels, kernel_size=kernel_size, strides=stride, padding="VALID", groups=groups, use_bias=False, name="convolution", ) self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization") self.activation = ACT2FN[activation] if activation is not None else tf.identity self.in_channels = in_channels self.out_channels = out_channels def call(self, hidden_state): hidden_state = self.convolution(self.padding(hidden_state)) hidden_state = self.normalization(hidden_state) hidden_state = self.activation(hidden_state) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convolution", None) is not None: with tf.name_scope(self.convolution.name): self.convolution.build([None, None, None, self.in_channels]) if getattr(self, "normalization", None) is not None: with tf.name_scope(self.normalization.name): self.normalization.build([None, None, None, self.out_channels]) class TFRegNetEmbeddings(keras.layers.Layer): """ RegNet Embeddings (stem) composed of a single aggressive convolution. """ def __init__(self, config: RegNetConfig, **kwargs): super().__init__(**kwargs) self.num_channels = config.num_channels self.embedder = TFRegNetConvLayer( in_channels=config.num_channels, out_channels=config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act, name="embedder", ) def call(self, pixel_values): num_channels = shape_list(pixel_values)[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) hidden_state = self.embedder(pixel_values) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embedder", None) is not None: with tf.name_scope(self.embedder.name): self.embedder.build(None) class TFRegNetShortCut(keras.layers.Layer): """ RegNet shortcut, used to project the residual features to the correct size. If needed, it is also used to downsample the input using `stride=2`. """ def __init__(self, in_channels: int, out_channels: int, stride: int = 2, **kwargs): super().__init__(**kwargs) self.convolution = keras.layers.Conv2D( filters=out_channels, kernel_size=1, strides=stride, use_bias=False, name="convolution" ) self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization") self.in_channels = in_channels self.out_channels = out_channels def call(self, inputs: tf.Tensor, training: bool = False) -> tf.Tensor: return self.normalization(self.convolution(inputs), training=training) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convolution", None) is not None: with tf.name_scope(self.convolution.name): self.convolution.build([None, None, None, self.in_channels]) if getattr(self, "normalization", None) is not None: with tf.name_scope(self.normalization.name): self.normalization.build([None, None, None, self.out_channels]) class TFRegNetSELayer(keras.layers.Layer): """ Squeeze and Excitation layer (SE) proposed in [Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507). """ def __init__(self, in_channels: int, reduced_channels: int, **kwargs): super().__init__(**kwargs) self.pooler = keras.layers.GlobalAveragePooling2D(keepdims=True, name="pooler") self.attention = [ keras.layers.Conv2D(filters=reduced_channels, kernel_size=1, activation="relu", name="attention.0"), keras.layers.Conv2D(filters=in_channels, kernel_size=1, activation="sigmoid", name="attention.2"), ] self.in_channels = in_channels self.reduced_channels = reduced_channels def call(self, hidden_state): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] pooled = self.pooler(hidden_state) for layer_module in self.attention: pooled = layer_module(pooled) hidden_state = hidden_state * pooled return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build((None, None, None, None)) if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention[0].name): self.attention[0].build([None, None, None, self.in_channels]) with tf.name_scope(self.attention[1].name): self.attention[1].build([None, None, None, self.reduced_channels]) class TFRegNetXLayer(keras.layers.Layer): """ RegNet's layer composed by three `3x3` convolutions, same as a ResNet bottleneck layer with reduction = 1. """ def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1, **kwargs): super().__init__(**kwargs) should_apply_shortcut = in_channels != out_channels or stride != 1 groups = max(1, out_channels // config.groups_width) self.shortcut = ( TFRegNetShortCut(in_channels, out_channels, stride=stride, name="shortcut") if should_apply_shortcut else keras.layers.Activation("linear", name="shortcut") ) # `self.layers` instead of `self.layer` because that is a reserved argument. self.layers = [ TFRegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act, name="layer.0"), TFRegNetConvLayer( out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act, name="layer.1" ), TFRegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None, name="layer.2"), ] self.activation = ACT2FN[config.hidden_act] def call(self, hidden_state): residual = hidden_state for layer_module in self.layers: hidden_state = layer_module(hidden_state) residual = self.shortcut(residual) hidden_state += residual hidden_state = self.activation(hidden_state) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "shortcut", None) is not None: with tf.name_scope(self.shortcut.name): self.shortcut.build(None) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) class TFRegNetYLayer(keras.layers.Layer): """ RegNet's Y layer: an X layer with Squeeze and Excitation. """ def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1, **kwargs): super().__init__(**kwargs) should_apply_shortcut = in_channels != out_channels or stride != 1 groups = max(1, out_channels // config.groups_width) self.shortcut = ( TFRegNetShortCut(in_channels, out_channels, stride=stride, name="shortcut") if should_apply_shortcut else keras.layers.Activation("linear", name="shortcut") ) self.layers = [ TFRegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act, name="layer.0"), TFRegNetConvLayer( out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act, name="layer.1" ), TFRegNetSELayer(out_channels, reduced_channels=int(round(in_channels / 4)), name="layer.2"), TFRegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None, name="layer.3"), ] self.activation = ACT2FN[config.hidden_act] def call(self, hidden_state): residual = hidden_state for layer_module in self.layers: hidden_state = layer_module(hidden_state) residual = self.shortcut(residual) hidden_state += residual hidden_state = self.activation(hidden_state) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "shortcut", None) is not None: with tf.name_scope(self.shortcut.name): self.shortcut.build(None) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) class TFRegNetStage(keras.layers.Layer): """ A RegNet stage composed by stacked layers. """ def __init__( self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 2, depth: int = 2, **kwargs ): super().__init__(**kwargs) layer = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer self.layers = [ # downsampling is done in the first layer with stride of 2 layer(config, in_channels, out_channels, stride=stride, name="layers.0"), *[layer(config, out_channels, out_channels, name=f"layers.{i+1}") for i in range(depth - 1)], ] def call(self, hidden_state): for layer_module in self.layers: hidden_state = layer_module(hidden_state) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) class TFRegNetEncoder(keras.layers.Layer): def __init__(self, config: RegNetConfig, **kwargs): super().__init__(**kwargs) self.stages = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( config, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], name="stages.0", ) ) in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:]) for i, ((in_channels, out_channels), depth) in enumerate(zip(in_out_channels, config.depths[1:])): self.stages.append(TFRegNetStage(config, in_channels, out_channels, depth=depth, name=f"stages.{i+1}")) def call( self, hidden_state: tf.Tensor, output_hidden_states: bool = False, return_dict: bool = True ) -> TFBaseModelOutputWithNoAttention: hidden_states = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: hidden_states = hidden_states + (hidden_state,) hidden_state = stage_module(hidden_state) if output_hidden_states: hidden_states = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states) def build(self, input_shape=None): if self.built: return self.built = True for stage in self.stages: with tf.name_scope(stage.name): stage.build(None) @keras_serializable class TFRegNetMainLayer(keras.layers.Layer): config_class = RegNetConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.embedder = TFRegNetEmbeddings(config, name="embedder") self.encoder = TFRegNetEncoder(config, name="encoder") self.pooler = keras.layers.GlobalAveragePooling2D(keepdims=True, name="pooler") @unpack_inputs def call( self, pixel_values: tf.Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> TFBaseModelOutputWithPoolingAndNoAttention: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict embedding_output = self.embedder(pixel_values, training=training) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training ) last_hidden_state = encoder_outputs[0] pooled_output = self.pooler(last_hidden_state) # Change to NCHW output format have uniformity in the modules pooled_output = tf.transpose(pooled_output, perm=(0, 3, 1, 2)) last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2)) # Change the other hidden state outputs to NCHW as well if output_hidden_states: hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embedder", None) is not None: with tf.name_scope(self.embedder.name): self.embedder.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build((None, None, None, None)) class TFRegNetPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RegNetConfig base_model_prefix = "regnet" main_input_name = "pixel_values" @property def input_signature(self): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224), dtype=tf.float32)} REGNET_START_DOCSTRING = r""" This model is a Tensorflow [keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ REGNET_INPUTS_DOCSTRING = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", REGNET_START_DOCSTRING, ) class TFRegNetModel(TFRegNetPreTrainedModel): def __init__(self, config: RegNetConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.regnet = TFRegNetMainLayer(config, name="regnet") @unpack_inputs @add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def call( self, pixel_values: tf.Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.regnet( pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "regnet", None) is not None: with tf.name_scope(self.regnet.name): self.regnet.build(None) @add_start_docstrings( """ RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """, REGNET_START_DOCSTRING, ) class TFRegNetForImageClassification(TFRegNetPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: RegNetConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.regnet = TFRegNetMainLayer(config, name="regnet") # classification head self.classifier = [ keras.layers.Flatten(), keras.layers.Dense(config.num_labels, name="classifier.1") if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def call( self, pixel_values: Optional[tf.Tensor] = None, labels: Optional[tf.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.regnet( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training ) pooled_output = outputs.pooler_output if return_dict else outputs[1] flattened_output = self.classifier[0](pooled_output) logits = self.classifier[1](flattened_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "regnet", None) is not None: with tf.name_scope(self.regnet.name): self.regnet.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier[1].name): self.classifier[1].build([None, None, None, self.config.hidden_sizes[-1]])
transformers/src/transformers/models/regnet/modeling_tf_regnet.py/0
{ "file_path": "transformers/src/transformers/models/regnet/modeling_tf_regnet.py", "repo_id": "transformers", "token_count": 10486 }
358
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert RoBERTa checkpoint.""" import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("0.9.0"): raise Exception("requires fairseq >= 0.9.0") logging.set_verbosity_info() logger = logging.get_logger(__name__) SAMPLE_TEXT = "Hello world! cécé herlolip" def convert_roberta_checkpoint_to_pytorch( roberta_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool ): """ Copy/paste/tweak roberta's weights to our BERT structure. """ roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path) roberta.eval() # disable dropout roberta_sent_encoder = roberta.model.encoder.sentence_encoder config = RobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings, hidden_size=roberta.args.encoder_embed_dim, num_hidden_layers=roberta.args.encoder_layers, num_attention_heads=roberta.args.encoder_attention_heads, intermediate_size=roberta.args.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-5, # PyTorch default used in fairseq ) if classification_head: config.num_labels = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our BERT config:", config) model = RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config) model.eval() # Now let's copy all the weights. # Embeddings model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. model.roberta.embeddings.LayerNorm.weight = roberta_sent_encoder.emb_layer_norm.weight model.roberta.embeddings.LayerNorm.bias = roberta_sent_encoder.emb_layer_norm.bias for i in range(config.num_hidden_layers): # Encoder: start of layer layer: BertLayer = model.roberta.encoder.layer[i] roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] # self attention self_attn: BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)) ) self_attn.query.weight.data = roberta_layer.self_attn.q_proj.weight self_attn.query.bias.data = roberta_layer.self_attn.q_proj.bias self_attn.key.weight.data = roberta_layer.self_attn.k_proj.weight self_attn.key.bias.data = roberta_layer.self_attn.k_proj.bias self_attn.value.weight.data = roberta_layer.self_attn.v_proj.weight self_attn.value.bias.data = roberta_layer.self_attn.v_proj.bias # self-attention output self_output: BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape self_output.dense.weight = roberta_layer.self_attn.out_proj.weight self_output.dense.bias = roberta_layer.self_attn.out_proj.bias self_output.LayerNorm.weight = roberta_layer.self_attn_layer_norm.weight self_output.LayerNorm.bias = roberta_layer.self_attn_layer_norm.bias # intermediate intermediate: BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape intermediate.dense.weight = roberta_layer.fc1.weight intermediate.dense.bias = roberta_layer.fc1.bias # output bert_output: BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape bert_output.dense.weight = roberta_layer.fc2.weight bert_output.dense.bias = roberta_layer.fc2.bias bert_output.LayerNorm.weight = roberta_layer.final_layer_norm.weight bert_output.LayerNorm.bias = roberta_layer.final_layer_norm.bias # end of layer if classification_head: model.classifier.dense.weight = roberta.model.classification_heads["mnli"].dense.weight model.classifier.dense.bias = roberta.model.classification_heads["mnli"].dense.bias model.classifier.out_proj.weight = roberta.model.classification_heads["mnli"].out_proj.weight model.classifier.out_proj.bias = roberta.model.classification_heads["mnli"].out_proj.bias else: # LM Head model.lm_head.dense.weight = roberta.model.encoder.lm_head.dense.weight model.lm_head.dense.bias = roberta.model.encoder.lm_head.dense.bias model.lm_head.layer_norm.weight = roberta.model.encoder.lm_head.layer_norm.weight model.lm_head.layer_norm.bias = roberta.model.encoder.lm_head.layer_norm.bias model.lm_head.decoder.weight = roberta.model.encoder.lm_head.weight model.lm_head.decoder.bias = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1 our_output = model(input_ids)[0] if classification_head: their_output = roberta.model.classification_heads["mnli"](roberta.extract_features(input_ids)) else: their_output = roberta.model(input_ids)[0] print(our_output.shape, their_output.shape) max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item() print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7 success = torch.allclose(our_output, their_output, atol=1e-3) print("Do both models output the same tensors?", "🔥" if success else "💩") if not success: raise Exception("Something went wRoNg") pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) args = parser.parse_args() convert_roberta_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
transformers/src/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 3215 }
359
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _import_structure = { "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_roformer_fast"] = ["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_roformer"] = [ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_roformer"] = [ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_roformer"] = [ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/roformer/__init__.py/0
{ "file_path": "transformers/src/transformers/models/roformer/__init__.py", "repo_id": "transformers", "token_count": 2239 }
360
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for SAM.""" import math from copy import deepcopy from itertools import product from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, pad, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import ( TensorType, is_tf_available, is_torch_available, is_torchvision_available, logging, requires_backends, ) if is_torch_available(): import torch import torch.nn.functional as F if is_torchvision_available(): from torchvision.ops.boxes import batched_nms if is_tf_available(): import tensorflow as tf from tensorflow.experimental import numpy as tnp from ...tf_utils import flatten, shape_list logger = logging.get_logger(__name__) class SamImageProcessor(BaseImageProcessor): r""" Constructs a SAM image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"longest_edge": 1024}`): Size of the output image after resizing. Resizes the longest edge of the image to match `size["longest_edge"]` while maintaining the aspect ratio. Can be overridden by the `size` parameter in the `preprocess` method. mask_size (`dict`, *optional*, defaults to `{"longest_edge": 256}`): Size of the output segmentation map after resizing. Resizes the longest edge of the image to match `size["longest_edge"]` while maintaining the aspect ratio. Can be overridden by the `mask_size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Wwhether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image to the specified `pad_size`. Can be overridden by the `do_pad` parameter in the `preprocess` method. pad_size (`dict`, *optional*, defaults to `{"height": 1024, "width": 1024}`): Size of the output image after padding. Can be overridden by the `pad_size` parameter in the `preprocess` method. mask_pad_size (`dict`, *optional*, defaults to `{"height": 256, "width": 256}`): Size of the output segmentation map after padding. Can be overridden by the `mask_pad_size` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, mask_size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: bool = True, pad_size: int = None, mask_pad_size: int = None, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"longest_edge": 1024} size = get_size_dict(max_size=size, default_to_square=False) if not isinstance(size, dict) else size pad_size = pad_size if pad_size is not None else {"height": 1024, "width": 1024} pad_size = get_size_dict(pad_size, default_to_square=True) mask_size = mask_size if mask_size is not None else {"longest_edge": 256} mask_size = ( get_size_dict(max_size=mask_size, default_to_square=False) if not isinstance(mask_size, dict) else mask_size ) mask_pad_size = mask_pad_size if mask_pad_size is not None else {"height": 256, "width": 256} mask_pad_size = get_size_dict(mask_pad_size, default_to_square=True) self.do_resize = do_resize self.size = size self.mask_size = mask_size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad self.pad_size = pad_size self.mask_pad_size = mask_pad_size self.do_convert_rgb = do_convert_rgb def pad_image( self, image: np.ndarray, pad_size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Pad an image to `(pad_size["height"], pad_size["width"])` with zeros to the right and bottom. Args: image (`np.ndarray`): Image to pad. pad_size (`Dict[str, int]`): Size of the output image after padding. data_format (`str` or `ChannelDimension`, *optional*): The data format of the image. Can be either "channels_first" or "channels_last". If `None`, the `data_format` of the `image` will be used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ output_height, output_width = pad_size["height"], pad_size["width"] input_height, input_width = get_image_size(image, channel_dim=input_data_format) pad_width = output_width - input_width pad_height = output_height - input_height padded_image = pad( image, ((0, pad_height), (0, pad_width)), data_format=data_format, input_data_format=input_data_format, **kwargs, ) return padded_image def _get_preprocess_shape(self, old_shape: Tuple[int, int], longest_edge: int): """ Compute the output size given input size and target long side length. """ oldh, oldw = old_shape scale = longest_edge * 1.0 / max(oldh, oldw) newh, neww = oldh * scale, oldw * scale newh = int(newh + 0.5) neww = int(neww + 0.5) return (newh, neww) def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"longest_edge": int}` specifying the size of the output image. The longest edge of the image will be resized to the specified size, while the other edge will be resized to maintain the aspect ratio. resample: `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "longest_edge" not in size: raise ValueError(f"The `size` dictionary must contain the key `longest_edge`. Got {size.keys()}") input_size = get_image_size(image, channel_dim=input_data_format) output_height, output_width = self._get_preprocess_shape(input_size, size["longest_edge"]) return resize( image, size=(output_height, output_width), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def _preprocess( self, image: ImageInput, do_resize: bool, do_rescale: bool, do_normalize: bool, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = None, rescale_factor: Optional[float] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, pad_size: Optional[Dict[str, int]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) reshaped_input_size = get_image_size(image, channel_dim=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) if do_pad: image = self.pad_image(image=image, pad_size=pad_size, input_data_format=input_data_format) return image, reshaped_input_size def _preprocess_image( self, image: ImageInput, do_resize: Optional[bool] = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, pad_size: Optional[Dict[str, int]] = None, do_convert_rgb: Optional[bool] = None, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]]: image = to_numpy_array(image) # PIL RGBA images are converted to RGB if do_convert_rgb: image = convert_to_rgb(image) # All transformations expect numpy arrays. image = to_numpy_array(image) if is_scaled_image(image) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) original_size = get_image_size(image, channel_dim=input_data_format) image, reshaped_input_size = self._preprocess( image=image, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, pad_size=pad_size, input_data_format=input_data_format, ) if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image, original_size, reshaped_input_size def _preprocess_mask( self, segmentation_map: ImageInput, do_resize: Optional[bool] = None, mask_size: Dict[str, int] = None, do_pad: Optional[bool] = None, mask_pad_size: Optional[Dict[str, int]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: segmentation_map = to_numpy_array(segmentation_map) # Add channel dimension if missing - needed for certain transformations if segmentation_map.ndim == 2: added_channel_dim = True segmentation_map = segmentation_map[None, ...] input_data_format = ChannelDimension.FIRST else: added_channel_dim = False if input_data_format is None: input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1) original_size = get_image_size(segmentation_map, channel_dim=input_data_format) segmentation_map, _ = self._preprocess( image=segmentation_map, do_resize=do_resize, size=mask_size, resample=PILImageResampling.NEAREST, do_rescale=False, do_normalize=False, do_pad=do_pad, pad_size=mask_pad_size, input_data_format=input_data_format, ) # Remove extra channel dimension if added for processing if added_channel_dim: segmentation_map = segmentation_map.squeeze(0) segmentation_map = segmentation_map.astype(np.int64) return segmentation_map, original_size def preprocess( self, images: ImageInput, segmentation_maps: Optional[ImageInput] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, mask_size: Optional[Dict[str, int]] = None, resample: Optional["PILImageResampling"] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[Union[int, float]] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, pad_size: Optional[Dict[str, int]] = None, mask_pad_size: Optional[Dict[str, int]] = None, do_convert_rgb: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. segmentation_maps (`ImageInput`, *optional*): Segmentation map to preprocess. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Controls the size of the image after `resize`. The longest edge of the image is resized to `size["longest_edge"]` whilst preserving the aspect ratio. mask_size (`Dict[str, int]`, *optional*, defaults to `self.mask_size`): Controls the size of the segmentation map after `resize`. The longest edge of the image is resized to `size["longest_edge"]` whilst preserving the aspect ratio. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image pixel values by rescaling factor. rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to apply to the image pixel values. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to normalize the image by if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to normalize the image by if `do_normalize` is set to `True`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image. pad_size (`Dict[str, int]`, *optional*, defaults to `self.pad_size`): Controls the size of the padding applied to the image. The image is padded to `pad_size["height"]` and `pad_size["width"]` if `do_pad` is set to `True`. mask_pad_size (`Dict[str, int]`, *optional*, defaults to `self.mask_pad_size`): Controls the size of the padding applied to the segmentation map. The image is padded to `mask_pad_size["height"]` and `mask_pad_size["width"]` if `do_pad` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(max_size=size, default_to_square=False) if not isinstance(size, dict) else size mask_size = mask_size if mask_size is not None else self.mask_size mask_size = ( get_size_dict(max_size=mask_size, default_to_square=False) if not isinstance(mask_size, dict) else mask_size ) resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_pad = do_pad if do_pad is not None else self.do_pad pad_size = pad_size if pad_size is not None else self.pad_size pad_size = get_size_dict(pad_size, default_to_square=True) mask_pad_size = mask_pad_size if mask_pad_size is not None else self.mask_pad_size mask_pad_size = get_size_dict(mask_pad_size, default_to_square=True) do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if segmentation_maps is not None: segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2) if not valid_images(segmentation_maps): raise ValueError( "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and (size is None or resample is None): raise ValueError("Size and resample must be specified if do_resize is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") if do_pad and pad_size is None: raise ValueError("Pad size must be specified if do_pad is True.") images, original_sizes, reshaped_input_sizes = zip( *( self._preprocess_image( image=img, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, pad_size=pad_size, do_convert_rgb=do_convert_rgb, data_format=data_format, input_data_format=input_data_format, ) for img in images ) ) data = { "pixel_values": images, "original_sizes": original_sizes, "reshaped_input_sizes": reshaped_input_sizes, } if segmentation_maps is not None: segmentation_maps, original_mask_sizes = zip( *( self._preprocess_mask( segmentation_map=mask, do_resize=do_resize, mask_size=mask_size, do_pad=do_pad, mask_pad_size=mask_pad_size, input_data_format=input_data_format, ) for mask in segmentation_maps ) ) # masks should start out the same size as input images assert all( original_im_size == original_mask_size for original_im_size, original_mask_size in zip(original_sizes, original_mask_sizes) ), "Segmentation maps should be the same size as input images." data["labels"] = segmentation_maps return BatchFeature(data=data, tensor_type=return_tensors) def post_process_masks( self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None, return_tensors="pt", ): """ Remove padding and upscale masks to the original image size. Args: masks (`Union[List[torch.Tensor], List[np.ndarray], List[tf.Tensor]]`): Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. original_sizes (`Union[torch.Tensor, tf.Tensor, List[Tuple[int,int]]]`): The original sizes of each image before it was resized to the model's expected input shape, in (height, width) format. reshaped_input_sizes (`Union[torch.Tensor, tf.Tensor, List[Tuple[int,int]]]`): The size of each image as it is fed to the model, in (height, width) format. Used to remove padding. mask_threshold (`float`, *optional*, defaults to 0.0): The threshold to use for binarizing the masks. binarize (`bool`, *optional*, defaults to `True`): Whether to binarize the masks. pad_size (`int`, *optional*, defaults to `self.pad_size`): The target size the images were padded to before being passed to the model. If None, the target size is assumed to be the processor's `pad_size`. return_tensors (`str`, *optional*, defaults to `"pt"`): If `"pt"`, return PyTorch tensors. If `"tf"`, return TensorFlow tensors. Returns: (`Union[torch.Tensor, tf.Tensor]`): Batched masks in batch_size, num_channels, height, width) format, where (height, width) is given by original_size. """ if return_tensors == "pt": return self._post_process_masks_pt( masks=masks, original_sizes=original_sizes, reshaped_input_sizes=reshaped_input_sizes, mask_threshold=mask_threshold, binarize=binarize, pad_size=pad_size, ) elif return_tensors == "tf": return self._post_process_masks_tf( masks=masks, original_sizes=original_sizes, reshaped_input_sizes=reshaped_input_sizes, mask_threshold=mask_threshold, binarize=binarize, pad_size=pad_size, ) else: raise ValueError("return_tensors must be either 'pt' or 'tf'") def _post_process_masks_pt( self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None ): """ Remove padding and upscale masks to the original image size. Args: masks (`Union[List[torch.Tensor], List[np.ndarray]]`): Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`): The original sizes of each image before it was resized to the model's expected input shape, in (height, width) format. reshaped_input_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`): The size of each image as it is fed to the model, in (height, width) format. Used to remove padding. mask_threshold (`float`, *optional*, defaults to 0.0): The threshold to use for binarizing the masks. binarize (`bool`, *optional*, defaults to `True`): Whether to binarize the masks. pad_size (`int`, *optional*, defaults to `self.pad_size`): The target size the images were padded to before being passed to the model. If None, the target size is assumed to be the processor's `pad_size`. Returns: (`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width) is given by original_size. """ requires_backends(self, ["torch"]) pad_size = self.pad_size if pad_size is None else pad_size target_image_size = (pad_size["height"], pad_size["width"]) if isinstance(original_sizes, (torch.Tensor, np.ndarray)): original_sizes = original_sizes.tolist() if isinstance(reshaped_input_sizes, (torch.Tensor, np.ndarray)): reshaped_input_sizes = reshaped_input_sizes.tolist() output_masks = [] for i, original_size in enumerate(original_sizes): if isinstance(masks[i], np.ndarray): masks[i] = torch.from_numpy(masks[i]) elif not isinstance(masks[i], torch.Tensor): raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`") interpolated_mask = F.interpolate(masks[i], target_image_size, mode="bilinear", align_corners=False) interpolated_mask = interpolated_mask[..., : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1]] interpolated_mask = F.interpolate(interpolated_mask, original_size, mode="bilinear", align_corners=False) if binarize: interpolated_mask = interpolated_mask > mask_threshold output_masks.append(interpolated_mask) return output_masks def _post_process_masks_tf( self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None ): """ Remove padding and upscale masks to the original image size. Args: masks (`tf.Tensor`): Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. original_sizes (`tf.Tensor`): The original size of the images before resizing for input to the model, in (height, width) format. reshaped_input_sizes (`tf.Tensor`): The size of the image input to the model, in (height, width) format. Used to remove padding. mask_threshold (`float`, *optional*, defaults to 0.0): The threshold to use for binarizing the masks. binarize (`bool`, *optional*, defaults to `True`): Whether to binarize the masks. pad_size (`int`, *optional*, defaults to `self.pad_size`): The target size the images were padded to before being passed to the model. If None, the target size is assumed to be the processor's `pad_size`. Returns: (`tf.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width) is given by original_size. """ requires_backends(self, ["tf"]) pad_size = self.pad_size if pad_size is None else pad_size target_image_size = (pad_size["height"], pad_size["width"]) output_masks = [] for i, original_size in enumerate(original_sizes): # tf.image expects NHWC, we transpose the NCHW inputs for it mask = tf.transpose(masks[i], perm=[0, 2, 3, 1]) interpolated_mask = tf.image.resize(mask, target_image_size, method="bilinear") interpolated_mask = interpolated_mask[:, : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1], :] interpolated_mask = tf.image.resize(interpolated_mask, original_size, method="bilinear") if binarize: interpolated_mask = interpolated_mask > mask_threshold # And then we transpose them back at the end output_masks.append(tf.transpose(interpolated_mask, perm=[0, 3, 1, 2])) return output_masks def post_process_for_mask_generation( self, all_masks, all_scores, all_boxes, crops_nms_thresh, return_tensors="pt" ): """ Post processes mask that are generated by calling the Non Maximum Suppression algorithm on the predicted masks. Args: all_masks (`Union[List[torch.Tensor], List[tf.Tensor]]`): List of all predicted segmentation masks all_scores (`Union[List[torch.Tensor], List[tf.Tensor]]`): List of all predicted iou scores all_boxes (`Union[List[torch.Tensor], List[tf.Tensor]]`): List of all bounding boxes of the predicted masks crops_nms_thresh (`float`): Threshold for NMS (Non Maximum Suppression) algorithm. return_tensors (`str`, *optional*, defaults to `pt`): If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`. """ if return_tensors == "pt": return _postprocess_for_mg(all_masks, all_scores, all_boxes, crops_nms_thresh) elif return_tensors == "tf": return _postprocess_for_mg_tf(all_masks, all_scores, all_boxes, crops_nms_thresh) def generate_crop_boxes( self, image, target_size, crop_n_layers: int = 0, overlap_ratio: float = 512 / 1500, points_per_crop: Optional[int] = 32, crop_n_points_downscale_factor: Optional[List[int]] = 1, device: Optional["torch.device"] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, return_tensors: str = "pt", ): """ Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer. Args: image (`np.array`): Input original image target_size (`int`): Target size of the resized image crop_n_layers (`int`, *optional*, defaults to 0): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. overlap_ratio (`float`, *optional*, defaults to 512/1500): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. points_per_crop (`int`, *optional*, defaults to 32): Number of points to sample from each crop. crop_n_points_downscale_factor (`List[int]`, *optional*, defaults to 1): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. device (`torch.device`, *optional*, defaults to None): Device to use for the computation. If None, cpu will be used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. return_tensors (`str`, *optional*, defaults to `pt`): If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`. """ crop_boxes, points_per_crop, cropped_images, input_labels = _generate_crop_boxes( image, target_size, crop_n_layers, overlap_ratio, points_per_crop, crop_n_points_downscale_factor, input_data_format, ) if return_tensors == "pt": if device is None: device = torch.device("cpu") crop_boxes = torch.tensor(crop_boxes, device=device) points_per_crop = torch.tensor(points_per_crop, device=device) # cropped_images stays as np input_labels = torch.tensor(input_labels, device=device) elif return_tensors == "tf": if device is not None: raise ValueError("device is not a supported argument when return_tensors is tf!") crop_boxes = tf.convert_to_tensor(crop_boxes) points_per_crop = tf.convert_to_tensor(points_per_crop) # cropped_images stays as np input_labels = tf.convert_to_tensor(input_labels) else: raise ValueError("return_tensors must be either 'pt' or 'tf'.") return crop_boxes, points_per_crop, cropped_images, input_labels def filter_masks( self, masks, iou_scores, original_size, cropped_box_image, pred_iou_thresh=0.88, stability_score_thresh=0.95, mask_threshold=0, stability_score_offset=1, return_tensors="pt", ): """ Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to bounding boxes and pad the predicted masks if necessary. Args: masks (`Union[torch.Tensor, tf.Tensor]`): Input masks. iou_scores (`Union[torch.Tensor, tf.Tensor]`): List of IoU scores. original_size (`Tuple[int,int]`): Size of the orginal image. cropped_box_image (`np.array`): The cropped image. pred_iou_thresh (`float`, *optional*, defaults to 0.88): The threshold for the iou scores. stability_score_thresh (`float`, *optional*, defaults to 0.95): The threshold for the stability score. mask_threshold (`float`, *optional*, defaults to 0): The threshold for the predicted masks. stability_score_offset (`float`, *optional*, defaults to 1): The offset for the stability score used in the `_compute_stability_score` method. return_tensors (`str`, *optional*, defaults to `pt`): If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`. """ if return_tensors == "pt": return self._filter_masks_pt( masks=masks, iou_scores=iou_scores, original_size=original_size, cropped_box_image=cropped_box_image, pred_iou_thresh=pred_iou_thresh, stability_score_thresh=stability_score_thresh, mask_threshold=mask_threshold, stability_score_offset=stability_score_offset, ) elif return_tensors == "tf": return self._filter_masks_tf( masks=masks, iou_scores=iou_scores, original_size=original_size, cropped_box_image=cropped_box_image, pred_iou_thresh=pred_iou_thresh, stability_score_thresh=stability_score_thresh, mask_threshold=mask_threshold, stability_score_offset=stability_score_offset, ) def _filter_masks_pt( self, masks, iou_scores, original_size, cropped_box_image, pred_iou_thresh=0.88, stability_score_thresh=0.95, mask_threshold=0, stability_score_offset=1, ): """ Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to bounding boxes and pad the predicted masks if necessary. Args: masks (`torch.Tensor`): Input masks. iou_scores (`torch.Tensor`): List of IoU scores. original_size (`Tuple[int,int]`): Size of the orginal image. cropped_box_image (`np.array`): The cropped image. pred_iou_thresh (`float`, *optional*, defaults to 0.88): The threshold for the iou scores. stability_score_thresh (`float`, *optional*, defaults to 0.95): The threshold for the stability score. mask_threshold (`float`, *optional*, defaults to 0): The threshold for the predicted masks. stability_score_offset (`float`, *optional*, defaults to 1): The offset for the stability score used in the `_compute_stability_score` method. """ requires_backends(self, ["torch"]) original_height, original_width = original_size iou_scores = iou_scores.flatten(0, 1) masks = masks.flatten(0, 1) if masks.shape[0] != iou_scores.shape[0]: raise ValueError("masks and iou_scores must have the same batch size.") if masks.device != iou_scores.device: iou_scores = iou_scores.to(masks.device) batch_size = masks.shape[0] keep_mask = torch.ones(batch_size, dtype=torch.bool, device=masks.device) if pred_iou_thresh > 0.0: keep_mask = keep_mask & (iou_scores > pred_iou_thresh) # compute stability score if stability_score_thresh > 0.0: stability_scores = _compute_stability_score_pt(masks, mask_threshold, stability_score_offset) keep_mask = keep_mask & (stability_scores > stability_score_thresh) scores = iou_scores[keep_mask] masks = masks[keep_mask] # binarize masks masks = masks > mask_threshold converted_boxes = _batched_mask_to_box(masks) keep_mask = ~_is_box_near_crop_edge( converted_boxes, cropped_box_image, [0, 0, original_width, original_height] ) scores = scores[keep_mask] masks = masks[keep_mask] converted_boxes = converted_boxes[keep_mask] masks = _pad_masks(masks, cropped_box_image, original_height, original_width) # conversion to rle is necessary to run non-maximum suppresion masks = _mask_to_rle_pytorch(masks) return masks, scores, converted_boxes def _filter_masks_tf( self, masks, iou_scores, original_size, cropped_box_image, pred_iou_thresh=0.88, stability_score_thresh=0.95, mask_threshold=0, stability_score_offset=1, ): """ Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to bounding boxes and pad the predicted masks if necessary. Args: masks (`tf.Tensor`): Input masks. iou_scores (`tf.Tensor`): List of IoU scores. original_size (`Tuple[int,int]`): Size of the orginal image. cropped_box_image (`np.array`): The cropped image. pred_iou_thresh (`float`, *optional*, defaults to 0.88): The threshold for the iou scores. stability_score_thresh (`float`, *optional*, defaults to 0.95): The threshold for the stability score. mask_threshold (`float`, *optional*, defaults to 0): The threshold for the predicted masks. stability_score_offset (`float`, *optional*, defaults to 1): The offset for the stability score used in the `_compute_stability_score` method. """ requires_backends(self, ["tf"]) original_height, original_width = original_size iou_scores = tf.reshape(iou_scores, [iou_scores.shape[0] * iou_scores.shape[1], iou_scores.shape[2:]]) masks = tf.reshape(masks, [masks.shape[0] * masks.shape[1], masks.shape[2:]]) if masks.shape[0] != iou_scores.shape[0]: raise ValueError("masks and iou_scores must have the same batch size.") batch_size = masks.shape[0] keep_mask = tf.ones(batch_size, dtype=tf.bool) if pred_iou_thresh > 0.0: keep_mask = keep_mask & (iou_scores > pred_iou_thresh) # compute stability score if stability_score_thresh > 0.0: stability_scores = _compute_stability_score_tf(masks, mask_threshold, stability_score_offset) keep_mask = keep_mask & (stability_scores > stability_score_thresh) scores = iou_scores[keep_mask] masks = masks[keep_mask] # binarize masks masks = masks > mask_threshold converted_boxes = _batched_mask_to_box_tf(masks) keep_mask = ~_is_box_near_crop_edge_tf( converted_boxes, cropped_box_image, [0, 0, original_width, original_height] ) scores = scores[keep_mask] masks = masks[keep_mask] converted_boxes = converted_boxes[keep_mask] masks = _pad_masks_tf(masks, cropped_box_image, original_height, original_width) # conversion to rle is necessary to run non-maximum suppresion masks = _mask_to_rle_tf(masks) return masks, scores, converted_boxes def _compute_stability_score_pt(masks: "torch.Tensor", mask_threshold: float, stability_score_offset: int): # One mask is always contained inside the other. # Save memory by preventing unnecesary cast to torch.int64 intersections = ( (masks > (mask_threshold + stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32) ) unions = (masks > (mask_threshold - stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32) stability_scores = intersections / unions return stability_scores def _compute_stability_score_tf(masks: "tf.Tensor", mask_threshold: float, stability_score_offset: int): # Torch does Py3-style division but TF does floor division with ints. We cast to float32 in TF to make sure # we get the right division results. intersections = tf.count_nonzero( masks > (mask_threshold + stability_score_offset), axis=[-1, -2], dtype=tf.float32 ) unions = tf.count_nonzero(masks > (mask_threshold - stability_score_offset), axis=[-1, -2], dtype=tf.float32) stability_scores = intersections / unions return stability_scores def _build_point_grid(n_per_side: int) -> np.ndarray: """Generates a 2D grid of points evenly spaced in [0,1]x[0,1].""" offset = 1 / (2 * n_per_side) points_one_side = np.linspace(offset, 1 - offset, n_per_side) points_x = np.tile(points_one_side[None, :], (n_per_side, 1)) points_y = np.tile(points_one_side[:, None], (1, n_per_side)) points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2) return points def _normalize_coordinates( target_size: int, coords: np.ndarray, original_size: Tuple[int, int], is_bounding_box=False ) -> np.ndarray: """ Expects a numpy array of length 2 in the final dimension. Requires the original image size in (height, width) format. """ old_height, old_width = original_size scale = target_size * 1.0 / max(old_height, old_width) new_height, new_width = old_height * scale, old_width * scale new_width = int(new_width + 0.5) new_height = int(new_height + 0.5) coords = deepcopy(coords).astype(float) if is_bounding_box: coords = coords.reshape(-1, 2, 2) coords[..., 0] = coords[..., 0] * (new_width / old_width) coords[..., 1] = coords[..., 1] * (new_height / old_height) if is_bounding_box: coords = coords.reshape(-1, 4) return coords def _generate_crop_boxes( image, target_size: int, # Is it tuple here? crop_n_layers: int = 0, overlap_ratio: float = 512 / 1500, points_per_crop: Optional[int] = 32, crop_n_points_downscale_factor: Optional[List[int]] = 1, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Tuple[List[List[int]], List[int]]: """ Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer. Args: image (Union[`numpy.ndarray`, `PIL.Image`, `torch.Tensor`]): Image to generate crops for. target_size (`int`): Size of the smallest crop. crop_n_layers (`int`, *optional*): If `crops_n_layers>0`, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. overlap_ratio (`int`, *optional*): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. points_per_crop (`int`, *optional*): Number of points to sample per crop. crop_n_points_downscale_factor (`int`, *optional*): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if isinstance(image, list): raise ValueError("Only one image is allowed for crop generation.") image = to_numpy_array(image) original_size = get_image_size(image, input_data_format) points_grid = [] for i in range(crop_n_layers + 1): n_points = int(points_per_crop / (crop_n_points_downscale_factor**i)) points_grid.append(_build_point_grid(n_points)) crop_boxes, layer_idxs = _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size) cropped_images, point_grid_per_crop = _generate_crop_images( crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format ) crop_boxes = np.array(crop_boxes) crop_boxes = crop_boxes.astype(np.float32) points_per_crop = np.array([point_grid_per_crop]) points_per_crop = np.transpose(points_per_crop, axes=(0, 2, 1, 3)) input_labels = np.ones_like(points_per_crop[:, :, :, 0], dtype=np.int64) return crop_boxes, points_per_crop, cropped_images, input_labels def _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size): """ Generates 2 ** (layers idx + 1) crops for each crop_n_layers. Crops are in the XYWH format : The XYWH format consists of the following required indices: - X: X coordinate of the top left of the bounding box - Y: Y coordinate of the top left of the bounding box - W: width of the bounding box - H: height of the bounding box """ crop_boxes, layer_idxs = [], [] im_height, im_width = original_size short_side = min(im_height, im_width) # Original image crop_boxes.append([0, 0, im_width, im_height]) layer_idxs.append(0) for i_layer in range(crop_n_layers): n_crops_per_side = 2 ** (i_layer + 1) overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side)) crop_width = int(math.ceil((overlap * (n_crops_per_side - 1) + im_width) / n_crops_per_side)) crop_height = int(math.ceil((overlap * (n_crops_per_side - 1) + im_height) / n_crops_per_side)) crop_box_x0 = [int((crop_width - overlap) * i) for i in range(n_crops_per_side)] crop_box_y0 = [int((crop_height - overlap) * i) for i in range(n_crops_per_side)] for left, top in product(crop_box_x0, crop_box_y0): box = [left, top, min(left + crop_width, im_width), min(top + crop_height, im_height)] crop_boxes.append(box) layer_idxs.append(i_layer + 1) return crop_boxes, layer_idxs def _generate_crop_images( crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format=None ): """ Takes as an input bounding boxes that are used to crop the image. Based in the crops, the corresponding points are also passed. """ cropped_images = [] total_points_per_crop = [] for i, crop_box in enumerate(crop_boxes): left, top, right, bottom = crop_box channel_dim = infer_channel_dimension_format(image, input_data_format) if channel_dim == ChannelDimension.LAST: cropped_im = image[top:bottom, left:right, :] else: cropped_im = image[:, top:bottom, left:right] cropped_images.append(cropped_im) cropped_im_size = get_image_size(cropped_im, channel_dim) points_scale = np.array(cropped_im_size)[None, ::-1] points = points_grid[layer_idxs[i]] * points_scale normalized_points = _normalize_coordinates(target_size, points, original_size) total_points_per_crop.append(normalized_points) return cropped_images, total_points_per_crop def _pad_masks(masks, crop_box: List[int], orig_height: int, orig_width: int): left, top, right, bottom = crop_box if left == 0 and top == 0 and right == orig_width and bottom == orig_height: return masks # Coordinate transform masks pad_x, pad_y = orig_width - (right - left), orig_height - (bottom - top) pad = (left, pad_x - left, top, pad_y - top) return torch.nn.functional.pad(masks, pad, value=0) def _pad_masks_tf(masks, crop_box: List[int], orig_height: int, orig_width: int): left, top, right, bottom = crop_box if left == 0 and top == 0 and right == orig_width and bottom == orig_height: return masks # Coordinate transform masks pad_x, pad_y = orig_width - (right - left), orig_height - (bottom - top) pad = (left, pad_x - left, top, pad_y - top) return tf.pad(masks, pad, constant_values=0) def _is_box_near_crop_edge(boxes, crop_box, orig_box, atol=20.0): """Filter masks at the edge of a crop, but not at the edge of the original image.""" crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device) orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device) left, top, _, _ = crop_box offset = torch.tensor([[left, top, left, top]], device=boxes.device) # Check if boxes has a channel dimension if len(boxes.shape) == 3: offset = offset.unsqueeze(1) boxes = (boxes + offset).float() near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0) near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0) near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge) return torch.any(near_crop_edge, dim=1) def _is_box_near_crop_edge_tf(boxes, crop_box, orig_box, atol=20.0): """Filter masks at the edge of a crop, but not at the edge of the original image.""" crop_box_tf = tf.convert_to_tensor(crop_box, dtype=tf.float32) orig_box_tf = tf.convert_to_tensor(orig_box, dtype=tf.float32) left, top, _, _ = crop_box offset = tf.convert_to_tensor([[left, top, left, top]]) # Check if boxes has a channel dimension if len(boxes.shape) == 3: offset = tf.expand_dims(offset, 1) boxes = tf.cast(boxes + offset, tf.float32) near_crop_edge = tnp.isclose(boxes, crop_box_tf[None, :], atol=atol, rtol=0) near_image_edge = tnp.isclose(boxes, orig_box_tf[None, :], atol=atol, rtol=0) near_crop_edge = tf.math.logical_and(near_crop_edge, ~near_image_edge) return tf.reduce_any(near_crop_edge, axis=1) def _batched_mask_to_box(masks: "torch.Tensor"): """ Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which corresponds the following required indices: - LEFT: left hand side of the bounding box - TOP: top of the bounding box - RIGHT: right of the bounding box - BOTTOM: bottom of the bounding box Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape is channel_1 x channel_2 x ... x 4. Args: - masks (`torch.Tensor` of shape `(batch, nb_mask, height, width)`) """ # torch.max below raises an error on empty inputs, just skip in this case if torch.numel(masks) == 0: return torch.zeros(*masks.shape[:-2], 4, device=masks.device) # Normalize shape to Cxheightxwidth shape = masks.shape height, width = shape[-2:] # Get top and bottom edges in_height, _ = torch.max(masks, dim=-1) in_height_coords = in_height * torch.arange(height, device=in_height.device)[None, :] bottom_edges, _ = torch.max(in_height_coords, dim=-1) in_height_coords = in_height_coords + height * (~in_height) top_edges, _ = torch.min(in_height_coords, dim=-1) # Get left and right edges in_width, _ = torch.max(masks, dim=-2) in_width_coords = in_width * torch.arange(width, device=in_width.device)[None, :] right_edges, _ = torch.max(in_width_coords, dim=-1) in_width_coords = in_width_coords + width * (~in_width) left_edges, _ = torch.min(in_width_coords, dim=-1) # If the mask is empty the right edge will be to the left of the left edge. # Replace these boxes with [0, 0, 0, 0] empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1) out = out * (~empty_filter).unsqueeze(-1) # Return to original shape out = out.reshape(*shape[:-2], 4) return out def _batched_mask_to_box_tf(masks: "tf.Tensor"): """ Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which corresponds the following required indices: - LEFT: left hand side of the bounding box - TOP: top of the bounding box - RIGHT: right of the bounding box - BOTTOM: bottom of the bounding box Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape is channel_1 x channel_2 x ... x 4. Args: - masks (`tf.Tensor` of shape `(batch, nb_mask, height, width)`) """ if tf.size(masks) == 0: return tf.zeros([*masks.shape[:-2], 4]) # Normalize shape to Cxheightxwidth shape = shape_list(masks) height, width = shape[-2:] # Get top and bottom edges in_height = tf.reduce_max(masks, axis=-1) in_height_coords = in_height * tf.range(height)[None, :] bottom_edges = tf.reduce_max(in_height_coords, axis=-1) in_height_coords = in_height_coords + height * (~in_height) top_edges = tf.reduce_min(in_height_coords, axis=-1) # Get left and right edges in_width, _ = tf.reduce_max(masks, axis=-2) in_width_coords = in_width * tf.range(width)[None, :] right_edges, _ = tf.reduce_max(in_width_coords, axis=-1) in_width_coords = in_width_coords + width * (~in_width) left_edges, _ = tf.reduce_min(in_width_coords, axis=-1) # If the mask is empty the right edge will be to the left of the left edge. # Replace these boxes with [0, 0, 0, 0] empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) out = tf.stack([left_edges, top_edges, right_edges, bottom_edges], axis=-1) out = out * tf.expand_dims(~empty_filter, -1) # Return to original shape out = tf.reshape(out, *shape[:-2], 4) return out def _mask_to_rle_pytorch(input_mask: "torch.Tensor"): """ Encodes masks the run-length encoding (RLE), in the format expected by pycoco tools. """ # Put in fortran order and flatten height and width batch_size, height, width = input_mask.shape input_mask = input_mask.permute(0, 2, 1).flatten(1) # Compute change indices diff = input_mask[:, 1:] ^ input_mask[:, :-1] change_indices = diff.nonzero() # Encode run length out = [] for i in range(batch_size): cur_idxs = change_indices[change_indices[:, 0] == i, 1] + 1 btw_idxs = cur_idxs[1:] - cur_idxs[:-1] counts = [] if input_mask[i, 0] == 0 else [0] counts += [cur_idxs[0].item()] + btw_idxs.tolist() + [height * width - cur_idxs[-1]] out.append({"size": [height, width], "counts": counts}) return out def _mask_to_rle_tf(input_mask: "tf.Tensor"): """ Encodes masks the run-length encoding (RLE), in the format expected by pycoco tools. """ # Put in fortran order and flatten height and width batch_size, height, width = input_mask.shape input_mask = flatten(tf.transpose(input_mask, perm=(0, 2, 1)), 1) # Compute change indices diff = input_mask[:, 1:] ^ input_mask[:, :-1] change_indices = tf.where(diff) # Encode run length out = [] for i in range(batch_size): cur_idxs = change_indices[change_indices[:, 0] == i, 1] + 1 btw_idxs = cur_idxs[1:] - cur_idxs[:-1] counts = [] if input_mask[i, 0] == 0 else [0] counts += [cur_idxs[0].item()] + btw_idxs.tolist() + [height * width - cur_idxs[-1]] out.append({"size": [height, width], "counts": counts}) return out def _rle_to_mask(rle: Dict[str, Any]) -> np.ndarray: """Compute a binary mask from an uncompressed RLE.""" height, width = rle["size"] mask = np.empty(height * width, dtype=bool) idx = 0 parity = False for count in rle["counts"]: mask[idx : idx + count] = parity idx += count parity = not parity mask = mask.reshape(width, height) return mask.transpose() # Reshape to original shape def _postprocess_for_mg(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7): """ Perform NMS (Non Maximum Suppression) on the outputs. Args: rle_masks (`torch.Tensor`): binary masks in the RLE format iou_scores (`torch.Tensor` of shape (nb_masks, 1)): iou_scores predicted by the model mask_boxes (`torch.Tensor`): The bounding boxes corresponding to segmentation masks amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7): NMS threshold. """ keep_by_nms = batched_nms( boxes=mask_boxes.float(), scores=iou_scores, idxs=torch.zeros(mask_boxes.shape[0]), iou_threshold=amg_crops_nms_thresh, ) iou_scores = iou_scores[keep_by_nms] rle_masks = [rle_masks[i] for i in keep_by_nms] mask_boxes = mask_boxes[keep_by_nms] masks = [_rle_to_mask(rle) for rle in rle_masks] return masks, iou_scores, rle_masks, mask_boxes def _postprocess_for_mg_tf(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7): """ Perform NMS (Non Maximum Suppression) on the outputs. Args: rle_masks (`tf.Tensor`): binary masks in the RLE format iou_scores (`tf.Tensor` of shape (nb_masks, 1)): iou_scores predicted by the model mask_boxes (`tf.Tensor`): The bounding boxes corresponding to segmentation masks amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7): NMS threshold. """ keep_by_nms = tf.image.combined_non_max_suppression( boxes=mask_boxes.float(), scores=iou_scores, idxs=torch.zeros(mask_boxes.shape[0]), iou_threshold=amg_crops_nms_thresh, ) iou_scores = iou_scores[keep_by_nms] rle_masks = [rle_masks[i] for i in keep_by_nms] mask_boxes = mask_boxes[keep_by_nms] masks = [_rle_to_mask(rle) for rle in rle_masks] return masks, iou_scores, rle_masks, mask_boxes
transformers/src/transformers/models/sam/image_processing_sam.py/0
{ "file_path": "transformers/src/transformers/models/sam/image_processing_sam.py", "repo_id": "transformers", "token_count": 29198 }
361
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Siglip model configuration""" import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) SIGLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/siglip-base-patch16-224": "https://huggingface.co/google/siglip-base-patch16-224/resolve/main/config.json", } class SiglipTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SiglipTextModel`]. It is used to instantiate a Siglip text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the text encoder of the Siglip [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the Siglip text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`SiglipModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 64): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. pad_token_id (`int`, *optional*, defaults to 1): The id of the padding token in the vocabulary. bos_token_id (`int`, *optional*, defaults to 49406): The id of the beginning-of-sequence token in the vocabulary. eos_token_id (`int`, *optional*, defaults to 49407): The id of the end-of-sequence token in the vocabulary. Example: ```python >>> from transformers import SiglipTextConfig, SiglipTextModel >>> # Initializing a SiglipTextConfig with google/siglip-base-patch16-224 style configuration >>> configuration = SiglipTextConfig() >>> # Initializing a SiglipTextModel (with random weights) from the google/siglip-base-patch16-224 style configuration >>> model = SiglipTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "siglip_text_model" def __init__( self, vocab_size=32000, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, max_position_embeddings=64, hidden_act="gelu_pytorch_tanh", layer_norm_eps=1e-6, attention_dropout=0.0, # This differs from `CLIPTokenizer`'s default and from openai/siglip # See https://github.com/huggingface/transformers/pull/24773#issuecomment-1632287538 pad_token_id=1, bos_token_id=49406, eos_token_id=49407, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.attention_dropout = attention_dropout @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) # get the text config dict if we are loading from SiglipConfig if config_dict.get("model_type") == "siglip": config_dict = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) class SiglipVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SiglipVisionModel`]. It is used to instantiate a Siglip vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. Example: ```python >>> from transformers import SiglipVisionConfig, SiglipVisionModel >>> # Initializing a SiglipVisionConfig with google/siglip-base-patch16-224 style configuration >>> configuration = SiglipVisionConfig() >>> # Initializing a SiglipVisionModel (with random weights) from the google/siglip-base-patch16-224 style configuration >>> model = SiglipVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "siglip_vision_model" def __init__( self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=16, hidden_act="gelu_pytorch_tanh", layer_norm_eps=1e-6, attention_dropout=0.0, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) # get the vision config dict if we are loading from SiglipConfig if config_dict.get("model_type") == "siglip": config_dict = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) class SiglipConfig(PretrainedConfig): r""" [`SiglipConfig`] is the configuration class to store the configuration of a [`SiglipModel`]. It is used to instantiate a Siglip model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Siglip [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`SiglipTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`SiglipVisionConfig`]. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import SiglipConfig, SiglipModel >>> # Initializing a SiglipConfig with google/siglip-base-patch16-224 style configuration >>> configuration = SiglipConfig() >>> # Initializing a SiglipModel (with random weights) from the google/siglip-base-patch16-224 style configuration >>> model = SiglipModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a SiglipConfig from a SiglipTextConfig and a SiglipVisionConfig >>> from transformers import SiglipTextConfig, SiglipVisionConfig >>> # Initializing a SiglipText and SiglipVision configuration >>> config_text = SiglipTextConfig() >>> config_vision = SiglipVisionConfig() >>> config = SiglipConfig.from_text_vision_configs(config_text, config_vision) ```""" model_type = "siglip" def __init__(self, text_config=None, vision_config=None, **kwargs): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info("`text_config` is `None`. Initializing the `SiglipTextConfig` with default values.") if vision_config is None: vision_config = {} logger.info("`vision_config` is `None`. initializing the `SiglipVisionConfig` with default values.") self.text_config = SiglipTextConfig(**text_config) self.vision_config = SiglipVisionConfig(**vision_config) self.initializer_factor = 1.0 @classmethod def from_text_vision_configs(cls, text_config: SiglipTextConfig, vision_config: SiglipVisionConfig, **kwargs): r""" Instantiate a [`SiglipConfig`] (or a derived class) from siglip text model configuration and siglip vision model configuration. Returns: [`SiglipConfig`]: An instance of a configuration object """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
transformers/src/transformers/models/siglip/configuration_siglip.py/0
{ "file_path": "transformers/src/transformers/models/siglip/configuration_siglip.py", "repo_id": "transformers", "token_count": 5034 }
362
# coding=utf-8 # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Speech2Text model.""" import math from typing import Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_speech_to_text import Speech2TextConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "Speech2TextConfig" SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/s2t-small-librispeech-asr", # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text ] # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class Conv1dSubsampler(nn.Module): """ Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation via gated linear units (https://arxiv.org/abs/1911.08460) """ def __init__(self, config): super(Conv1dSubsampler, self).__init__() self.config = config self.num_layers = config.num_conv_layers self.in_channels = config.input_feat_per_channel * config.input_channels self.mid_channels = config.conv_channels self.out_channels = config.d_model self.kernel_sizes = config.conv_kernel_sizes self.conv_layers = nn.ModuleList( nn.Conv1d( self.in_channels if i == 0 else self.mid_channels // 2, self.mid_channels if i < self.num_layers - 1 else self.out_channels * 2, kernel_size=k, stride=2, padding=k // 2, ) for i, k in enumerate(self.kernel_sizes) ) def forward(self, input_features): hidden_states = input_features.transpose(1, 2).contiguous() # -> B x (C x D) x T for conv in self.conv_layers: hidden_states = conv(hidden_states) hidden_states = nn.functional.glu(hidden_states, dim=1) hidden_states = hidden_states.transpose(1, 2).contiguous() # -> T x B x (C x D) return hidden_states class Speech2TextSinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) if hasattr(self, "weights"): # in forward put the weights on the correct dtype and device of the param emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.weights = nn.Parameter(emb_weights) self.weights.requires_grad = False self.weights.detach_() @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): bsz, seq_len = input_ids.size() # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to( input_ids.device ) # expand embeddings if needed max_pos = self.padding_idx + 1 + seq_len if max_pos > self.weights.size(0): self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx) return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach() def create_position_ids_from_input_ids( self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0 ): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Speech2Text class Speech2TextAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[Speech2TextConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value SPEECH_TO_TEXT_ATTENTION_CLASSES = {"eager": Speech2TextAttention} # Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Speech2Text, MBART->SPEECH_TO_TEXT class Speech2TextEncoderLayer(nn.Module): def __init__(self, config: Speech2TextConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[config._attn_implementation]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool = False, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Speech2Text, MBART->SPEECH_TO_TEXT class Speech2TextDecoderLayer(nn.Module): def __init__(self, config: Speech2TextConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[config._attn_implementation]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[config._attn_implementation]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class Speech2TextPreTrainedModel(PreTrainedModel): config_class = Speech2TextConfig base_model_prefix = "model" main_input_name = "input_features" supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): """ Computes the output length of the convolutional layers """ for i in range(self.config.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask): # generate creates 3D attention mask, because of the shape of input_features # convert it to 2D if thats the case if len(attention_mask.shape) > 2: attention_mask = attention_mask[:, :, -1] subsampled_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)) bsz = attention_mask.size()[0] attention_mask = torch.zeros( (bsz, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values # before the output lengths indices are attended to attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long() return attention_mask SPEECH_TO_TEXT_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`Speech2TextConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SPEECH_TO_TEXT_INPUTS_DOCSTRING = r""" Args: input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`): Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a tensor of type `torch.FloatTensor`. See [`~Speech2TextFeatureExtractor.__call__`] attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`SpeechToTextTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) SpeechToText uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_speech_to_text._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class Speech2TextEncoder(Speech2TextPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`Speech2TextEncoderLayer`]. Args: config: Speech2TextConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: Speech2TextConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_source_positions self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.conv = Conv1dSubsampler(config) self.embed_positions = Speech2TextSinusoidalPositionalEmbedding( self.max_source_positions, embed_dim, self.padding_idx, ) self.layers = nn.ModuleList([Speech2TextEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_features, attention_mask=None, head_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_features (`torch.LongTensor` of shape `(batch_size, sequence_length, feature_size)`): Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a tensor of type `torch.FloatTensor`. See [`~Speech2TextFeatureExtractor.__call__`] attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict inputs_embeds = self.conv(input_features) inputs_embeds = self.embed_scale * inputs_embeds # subsample attention mask if necessary if attention_mask is not None: attention_mask = self._get_feature_vector_attention_mask(inputs_embeds.shape[1], attention_mask) padding_mask = attention_mask.ne(1).long() else: padding_mask = torch.zeros(inputs_embeds.shape[:2], dtype=torch.long, device=inputs_embeds.device) embed_pos = self.embed_positions(padding_mask) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class Speech2TextDecoder(Speech2TextPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`Speech2TextDecoderLayer`] Args: config: Speech2TextConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: Speech2TextConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_target_positions self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) self.embed_positions = Speech2TextSinusoidalPositionalEmbedding( self.max_target_positions, config.d_model, self.padding_idx, ) self.layers = nn.ModuleList([Speech2TextDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`Speech2TextTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # embed positions positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: assert attn_mask.size()[0] == (len(self.layers)), ( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare Speech2Text Model outputting raw hidden-states without any specific head on top.", SPEECH_TO_TEXT_START_DOCSTRING, ) class Speech2TextModel(Speech2TextPreTrainedModel): def __init__(self, config: Speech2TextConfig): super().__init__(config) self.encoder = Speech2TextEncoder(config) self.decoder = Speech2TextDecoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_features: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" Returns: Example: ```python >>> import torch >>> from transformers import Speech2TextModel, AutoFeatureExtractor >>> from datasets import load_dataset >>> model = Speech2TextModel.from_pretrained("facebook/s2t-small-librispeech-asr") >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/s2t-small-librispeech-asr") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor( ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt" ... ) >>> input_features = inputs.input_features >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 256] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_features, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # downsample encoder attention mask if attention_mask is not None: encoder_attention_mask = self._get_feature_vector_attention_mask( encoder_outputs[0].shape[1], attention_mask ) else: encoder_attention_mask = None # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The Speech2Text Model with a language modeling head. Can be used for summarization.", SPEECH_TO_TEXT_START_DOCSTRING, ) class Speech2TextForConditionalGeneration(Speech2TextPreTrainedModel): base_model_prefix = "model" _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: Speech2TextConfig): super().__init__(config) self.model = Speech2TextModel(config) self.lm_head = nn.Linear(config.d_model, self.config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_features: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> import torch >>> from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration >>> from datasets import load_dataset >>> model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr") >>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = processor( ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt" ... ) >>> input_features = inputs.input_features >>> generated_ids = model.generate(inputs=input_features) >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> transcription 'mister quilter is the apostle of the middle classes and we are glad to welcome his gospel' ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_features, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] return { "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past
transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py/0
{ "file_path": "transformers/src/transformers/models/speech_to_text/modeling_speech_to_text.py", "repo_id": "transformers", "token_count": 28147 }
363
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Speech processor class for SpeechT5.""" from ...processing_utils import ProcessorMixin class SpeechT5Processor(ProcessorMixin): r""" Constructs a SpeechT5 processor which wraps a feature extractor and a tokenizer into a single processor. [`SpeechT5Processor`] offers all the functionalities of [`SpeechT5FeatureExtractor`] and [`SpeechT5Tokenizer`]. See the docstring of [`~SpeechT5Processor.__call__`] and [`~SpeechT5Processor.decode`] for more information. Args: feature_extractor (`SpeechT5FeatureExtractor`): An instance of [`SpeechT5FeatureExtractor`]. The feature extractor is a required input. tokenizer (`SpeechT5Tokenizer`): An instance of [`SpeechT5Tokenizer`]. The tokenizer is a required input. """ feature_extractor_class = "SpeechT5FeatureExtractor" tokenizer_class = "SpeechT5Tokenizer" def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__(self, *args, **kwargs): """ Processes audio and text input, as well as audio and text targets. You can process audio by using the argument `audio`, or process audio targets by using the argument `audio_target`. This forwards the arguments to SpeechT5FeatureExtractor's [`~SpeechT5FeatureExtractor.__call__`]. You can process text by using the argument `text`, or process text labels by using the argument `text_target`. This forwards the arguments to SpeechT5Tokenizer's [`~SpeechT5Tokenizer.__call__`]. Valid input combinations are: - `text` only - `audio` only - `text_target` only - `audio_target` only - `text` and `audio_target` - `audio` and `audio_target` - `text` and `text_target` - `audio` and `text_target` Please refer to the docstring of the above two methods for more information. """ audio = kwargs.pop("audio", None) text = kwargs.pop("text", None) text_target = kwargs.pop("text_target", None) audio_target = kwargs.pop("audio_target", None) sampling_rate = kwargs.pop("sampling_rate", None) if audio is not None and text is not None: raise ValueError( "Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" ) if audio_target is not None and text_target is not None: raise ValueError( "Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( "You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." ) if audio is not None: inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) elif text is not None: inputs = self.tokenizer(text, **kwargs) else: inputs = None if audio_target is not None: targets = self.feature_extractor(audio_target=audio_target, *args, sampling_rate=sampling_rate, **kwargs) labels = targets["input_values"] elif text_target is not None: targets = self.tokenizer(text_target, **kwargs) labels = targets["input_ids"] else: targets = None if inputs is None: return targets if targets is not None: inputs["labels"] = labels decoder_attention_mask = targets.get("attention_mask") if decoder_attention_mask is not None: inputs["decoder_attention_mask"] = decoder_attention_mask return inputs def pad(self, *args, **kwargs): """ Collates the audio and text inputs, as well as their targets, into a padded batch. Audio inputs are padded by SpeechT5FeatureExtractor's [`~SpeechT5FeatureExtractor.pad`]. Text inputs are padded by SpeechT5Tokenizer's [`~SpeechT5Tokenizer.pad`]. Valid input combinations are: - `input_ids` only - `input_values` only - `labels` only, either log-mel spectrograms or text tokens - `input_ids` and log-mel spectrogram `labels` - `input_values` and text `labels` Please refer to the docstring of the above two methods for more information. """ input_values = kwargs.pop("input_values", None) input_ids = kwargs.pop("input_ids", None) labels = kwargs.pop("labels", None) if input_values is not None and input_ids is not None: raise ValueError("Cannot process both `input_values` and `input_ids` inputs.") if input_values is None and input_ids is None and labels is None: raise ValueError( "You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." ) if input_values is not None: inputs = self.feature_extractor.pad(input_values, *args, **kwargs) elif input_ids is not None: inputs = self.tokenizer.pad(input_ids, **kwargs) else: inputs = None if labels is not None: if "input_ids" in labels or (isinstance(labels, list) and "input_ids" in labels[0]): targets = self.tokenizer.pad(labels, **kwargs) labels = targets["input_ids"] else: feature_size_hack = self.feature_extractor.feature_size self.feature_extractor.feature_size = self.feature_extractor.num_mel_bins targets = self.feature_extractor.pad(labels, *args, **kwargs) self.feature_extractor.feature_size = feature_size_hack labels = targets["input_values"] else: targets = None if inputs is None: return targets if targets is not None: inputs["labels"] = labels decoder_attention_mask = targets.get("attention_mask") if decoder_attention_mask is not None: inputs["decoder_attention_mask"] = decoder_attention_mask return inputs def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to SpeechT5Tokenizer's [`~SpeechT5Tokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to SpeechT5Tokenizer's [`~SpeechT5Tokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs)
transformers/src/transformers/models/speecht5/processing_speecht5.py/0
{ "file_path": "transformers/src/transformers/models/speecht5/processing_speecht5.py", "repo_id": "transformers", "token_count": 3047 }
364
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _import_structure = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_swin"] = [ "SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "SwinForImageClassification", "SwinForMaskedImageModeling", "SwinModel", "SwinPreTrainedModel", "SwinBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_swin"] = [ "TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSwinForImageClassification", "TFSwinForMaskedImageModeling", "TFSwinModel", "TFSwinPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/swin/__init__.py/0
{ "file_path": "transformers/src/transformers/models/swin/__init__.py", "repo_id": "transformers", "token_count": 1111 }
365
# coding=utf-8 # Copyright 2022, Google and HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Switch Transformers model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json", } class SwitchTransformersConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SwitchTransformersModel`]. It is used to instantiate a SwitchTransformers model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SwitchTransformers [google/switch-base-8](https://huggingface.co/google/switch-base-8) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 32128): Vocabulary size of the SwitchTransformers model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`SwitchTransformersModel`]. d_model (`int`, *optional*, defaults to 768): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model // num_heads`. d_ff (`int`, *optional*, defaults to 2048): Size of the intermediate feed forward layer in each `SwitchTransformersBlock`. expert_capacity (`int`, *optional*, defaults to 64): Number of tokens that can be stored in each expert. If set to 1, the model will behave like a regular Transformer. num_layers (`int`, *optional*, defaults to 12): Number of dense hidden layers in the Transformer encoder layer. num_sparse_encoder_layers (`int`, *optional*, defaults to 3): Number of sparse (MoE) dense hidden layers in the Transformer encoder layer. num_decoder_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_sparse_decoder_layers (`int`, *optional*, defaults to 3): Number of sparse (MoE) dense hidden layers in the Transformer decoder layer. num_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_experts (`int`, *optional*, defaults to 8): Number of experts for each SwitchTransformer layer. router_bias (`bool`, *optional*, defaults to `False`): Whether to add a bias to the router. router_jitter_noise (`float`, *optional*, defaults to 0.01): Amount of noise to add to the router. router_dtype (`str`, *optional*, default to `"float32"`): The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the *selective precision* discussion in [the paper](https://arxiv.org/abs/2101.03961). router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`): Whether to ignore padding tokens when routing. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. layer_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. router_z_loss_coef (`float`, *optional*, defaults to 0.001): The z loss factor for the total loss. router_aux_loss_coef (`float`, *optional*, defaults to 0.001): The aux loss factor for the total loss. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). dense_act_fn (`string`, *optional*, defaults to `"relu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. SwitchTransformersv1.1 uses the `"gated-gelu"` feed forward projection. Original SwitchTransformers uses `"relu"`. add_router_probs (`bool`, *optional*, defaults to `False`): Whether to output router probabilities to compute router auxiliary loss. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ model_type = "switch_transformers" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self, vocab_size=32128, d_model=768, d_kv=64, d_ff=2048, expert_capacity=64, num_layers=12, num_sparse_encoder_layers=3, num_decoder_layers=12, num_sparse_decoder_layers=3, num_heads=12, num_experts=8, router_bias=False, router_jitter_noise=0.01, router_dtype="float32", router_ignore_padding_tokens=False, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-6, router_z_loss_coef=0.001, router_aux_loss_coef=0.001, initializer_factor=1.0, dense_act_fn="relu", is_encoder_decoder=True, add_router_probs=False, use_cache=True, pad_token_id=0, eos_token_id=1, **kwargs, ): self.vocab_size = vocab_size self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_sparse_encoder_layers = num_sparse_encoder_layers self.num_layers = num_layers self.num_decoder_layers = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry self.num_sparse_decoder_layers = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: self.encoder_sparse_step = self.num_layers // self.num_sparse_encoder_layers else: self.encoder_sparse_step = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: self.decoder_sparse_step = self.num_decoder_layers // self.num_sparse_decoder_layers else: self.decoder_sparse_step = self.num_decoder_layers # HACK: this will create 0 sparse layers self.num_heads = num_heads self.num_experts = num_experts self.expert_capacity = expert_capacity self.router_bias = router_bias self.router_jitter_noise = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}") self.router_dtype = router_dtype self.router_ignore_padding_tokens = router_ignore_padding_tokens self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.use_cache = use_cache self.add_router_probs = add_router_probs self.router_z_loss_coef = router_z_loss_coef self.router_aux_loss_coef = router_aux_loss_coef self.dense_act_fn = dense_act_fn super().__init__( pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs, )
transformers/src/transformers/models/switch_transformers/configuration_switch_transformers.py/0
{ "file_path": "transformers/src/transformers/models/switch_transformers/configuration_switch_transformers.py", "repo_id": "transformers", "token_count": 3660 }
366
# coding=utf-8 # Copyright The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Table Transformer model configuration""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/table-transformer-detection": ( "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json" ), } class TableTransformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TableTransformerModel`]. It is used to instantiate a Table Transformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Table Transformer [microsoft/table-transformer-detection](https://huggingface.co/microsoft/table-transformer-detection) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: use_timm_backbone (`bool`, *optional*, defaults to `True`): Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] API. backbone_config (`PretrainedConfig` or `dict`, *optional*): The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which case it will default to `ResNetConfig()`. num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_queries (`int`, *optional*, defaults to 100): Number of object queries, i.e. detection slots. This is the maximal number of objects [`TableTransformerModel`] can detect in a single image. For COCO, we recommend 100 queries. d_model (`int`, *optional*, defaults to 256): Dimension of the layers. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1): The scaling factor used for the Xavier initialization gain in the HM Attention map module. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, `True`): Whether to use pretrained weights for the backbone. dilation (`bool`, *optional*, defaults to `False`): Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when `use_timm_backbone` = `True`. class_cost (`float`, *optional*, defaults to 1): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. mask_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the Focal loss in the panoptic segmentation loss. dice_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the DICE/F-1 loss in the panoptic segmentation loss. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss in the object detection loss. eos_coefficient (`float`, *optional*, defaults to 0.1): Relative classification weight of the 'no-object' class in the object detection loss. Examples: ```python >>> from transformers import TableTransformerModel, TableTransformerConfig >>> # Initializing a Table Transformer microsoft/table-transformer-detection style configuration >>> configuration = TableTransformerConfig() >>> # Initializing a model from the microsoft/table-transformer-detection style configuration >>> model = TableTransformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "table-transformer" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } # Copied from transformers.models.detr.configuration_detr.DetrConfig.__init__ def __init__( self, use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=100, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, encoder_layerdrop=0.0, decoder_layerdrop=0.0, is_encoder_decoder=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, auxiliary_loss=False, position_embedding_type="sine", backbone="resnet50", use_pretrained_backbone=True, dilation=False, class_cost=1, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, **kwargs, ): if not use_timm_backbone and use_pretrained_backbone: raise ValueError( "Loading pretrained backbone weights from the transformers library is not supported yet. `use_timm_backbone` must be set to `True` when `use_pretrained_backbone=True`" ) if backbone_config is not None and backbone is not None: raise ValueError("You can't specify both `backbone` and `backbone_config`.") if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.") if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"]) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) # set timm attributes to None dilation, backbone, use_pretrained_backbone = None, None, None self.use_timm_backbone = use_timm_backbone self.backbone_config = backbone_config self.num_channels = num_channels self.num_queries = num_queries self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.init_xavier_std = init_xavier_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.num_hidden_layers = encoder_layers self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.dilation = dilation # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.mask_loss_coefficient = mask_loss_coefficient self.dice_loss_coefficient = dice_loss_coefficient self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.eos_coefficient = eos_coefficient super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model # Copied from transformers.models.detr.configuration_detr.DetrOnnxConfig class TableTransformerOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def atol_for_validation(self) -> float: return 1e-5 @property def default_onnx_opset(self) -> int: return 12
transformers/src/transformers/models/table_transformer/configuration_table_transformer.py/0
{ "file_path": "transformers/src/transformers/models/table_transformer/configuration_table_transformer.py", "repo_id": "transformers", "token_count": 4981 }
367
# coding=utf-8 # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch TimeSformer model.""" import collections from typing import Optional, Tuple, Union import torch import torch.nn.functional import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_timesformer import TimesformerConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "TimesformerConfig" _CHECKPOINT_FOR_DOC = "facebook/timesformer" TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/timesformer-base-finetuned-k400", # See all TimeSformer models at https://huggingface.co/models?filter=timesformer ] # Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L155 class TimesformerPatchEmbeddings(nn.Module): """Image to Patch Embedding""" def __init__(self, config): super().__init__() image_size = config.image_size patch_size = config.patch_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.projection = nn.Conv2d(config.num_channels, config.hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values): batch_size, num_frames, num_channels, height, width = pixel_values.shape pixel_values = pixel_values.reshape(batch_size * num_frames, num_channels, height, width) embeddings = self.projection(pixel_values) patch_width = embeddings.size(-1) embeddings = embeddings.flatten(2).transpose(1, 2) return embeddings, num_frames, patch_width class TimesformerEmbeddings(nn.Module): """ Construct the patch and position embeddings. """ def __init__(self, config): super().__init__() embed_dim = config.hidden_size num_frames = config.num_frames drop_rate = config.hidden_dropout_prob attention_type = config.attention_type self.attention_type = attention_type self.patch_embeddings = TimesformerPatchEmbeddings(config) self.num_patches = self.patch_embeddings.num_patches # Positional Embeddings self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.position_embeddings = nn.Parameter(torch.zeros(1, self.num_patches + 1, embed_dim)) self.pos_drop = nn.Dropout(p=drop_rate) if attention_type != "space_only": self.time_embeddings = nn.Parameter(torch.zeros(1, num_frames, embed_dim)) self.time_drop = nn.Dropout(p=drop_rate) def forward(self, pixel_values): batch_size = pixel_values.shape[0] # create patch embeddings embeddings, num_frames, patch_width = self.patch_embeddings(pixel_values) cls_tokens = self.cls_token.expand(embeddings.size(0), -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) # resizing the positional embeddings in case they don't match the input at inference if embeddings.size(1) != self.position_embeddings.size(1): position_embeddings = self.position_embeddings cls_pos_embed = position_embeddings[0, 0, :].unsqueeze(0).unsqueeze(1) other_pos_embed = position_embeddings[0, 1:, :].unsqueeze(0).transpose(1, 2) patch_num = int(other_pos_embed.size(2) ** 0.5) patch_height = embeddings.size(1) // patch_width other_pos_embed = other_pos_embed.reshape(1, embeddings.size(2), patch_num, patch_num) new_pos_embed = nn.functional.interpolate( other_pos_embed, size=(patch_height, patch_width), mode="nearest" ) new_pos_embed = new_pos_embed.flatten(2) new_pos_embed = new_pos_embed.transpose(1, 2) new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1) embeddings = embeddings + new_pos_embed else: embeddings = embeddings + self.position_embeddings embeddings = self.pos_drop(embeddings) # Time Embeddings if self.attention_type != "space_only": cls_tokens = embeddings[:batch_size, 0, :].unsqueeze(1) embeddings = embeddings[:, 1:] _, patch_height, patch_width = embeddings.shape embeddings = ( embeddings.reshape(batch_size, num_frames, patch_height, patch_width) .permute(0, 2, 1, 3) .reshape(batch_size * patch_height, num_frames, patch_width) ) # Resizing time embeddings in case they don't match if num_frames != self.time_embeddings.size(1): time_embeddings = self.time_embeddings.transpose(1, 2) new_time_embeddings = nn.functional.interpolate(time_embeddings, size=(num_frames), mode="nearest") new_time_embeddings = new_time_embeddings.transpose(1, 2) embeddings = embeddings + new_time_embeddings else: embeddings = embeddings + self.time_embeddings embeddings = self.time_drop(embeddings) embeddings = embeddings.view(batch_size, patch_height, num_frames, patch_width).reshape( batch_size, patch_height * num_frames, patch_width ) embeddings = torch.cat((cls_tokens, embeddings), dim=1) return embeddings # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->TimeSformer class TimeSformerDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) # Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L57 class TimesformerSelfAttention(nn.Module): def __init__(self, config: TimesformerConfig): super().__init__() num_heads = config.num_attention_heads qkv_bias = config.qkv_bias attention_dropout_prob = config.attention_probs_dropout_prob self.num_heads = num_heads head_dim = config.hidden_size // num_heads self.scale = head_dim**-0.5 self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attention_dropout_prob) def forward(self, hidden_states, output_attentions: bool = False): batch_size, hidden_size, num_channels = hidden_states.shape qkv = ( self.qkv(hidden_states) .reshape(batch_size, hidden_size, 3, self.num_heads, num_channels // self.num_heads) .permute(2, 0, 3, 1, 4) ) query, key, value = qkv[0], qkv[1], qkv[2] attention_probs = (query @ key.transpose(-2, -1)) * self.scale attention_probs = attention_probs.softmax(dim=-1) attention_probs = self.attn_drop(attention_probs) context_layer = (attention_probs @ value).transpose(1, 2).reshape(batch_size, hidden_size, num_channels) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class TimesformerSelfOutput(nn.Module): """ The residual connection is defined in TimesformerLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: TimesformerConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class TimeSformerAttention(nn.Module): def __init__(self, config: TimesformerConfig) -> None: super().__init__() self.attention = TimesformerSelfAttention(config) self.output = TimesformerSelfOutput(config) def forward( self, hidden_states: torch.Tensor, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, output_attentions) attention_output = self.output(self_outputs[0]) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L39 class TimesformerIntermediate(nn.Module): def __init__(self, config: TimesformerConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class TimesformerOutput(nn.Module): def __init__(self, config: TimesformerConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L89 class TimesformerLayer(nn.Module): def __init__(self, config: TimesformerConfig, layer_index: int) -> None: super().__init__() attention_type = config.attention_type drop_path_rates = [ x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers) ] # stochastic depth decay rule drop_path_rate = drop_path_rates[layer_index] self.drop_path = TimeSformerDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.attention = TimeSformerAttention(config) self.intermediate = TimesformerIntermediate(config) self.output = TimesformerOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.config = config self.attention_type = attention_type if attention_type not in ["divided_space_time", "space_only", "joint_space_time"]: raise ValueError("Unknown attention type: {}".format(attention_type)) # Temporal Attention Parameters if self.attention_type == "divided_space_time": self.temporal_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.temporal_attention = TimeSformerAttention(config) self.temporal_dense = nn.Linear(config.hidden_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False): num_frames = self.config.num_frames num_patch_width = self.config.image_size // self.config.patch_size batch_size = hidden_states.shape[0] num_spatial_tokens = (hidden_states.size(1) - 1) // num_frames num_patch_height = num_spatial_tokens // num_patch_width if self.attention_type in ["space_only", "joint_space_time"]: self_attention_outputs = self.attention( self.layernorm_before(hidden_states), output_attentions=output_attentions ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights hidden_states = hidden_states + self.drop_path(attention_output) layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = self.output(layer_output) layer_output = hidden_states + self.drop_path(layer_output) outputs = (layer_output,) + outputs return outputs elif self.attention_type == "divided_space_time": # Temporal temporal_embedding = hidden_states[:, 1:, :] temporal_embedding = temporal_embedding.reshape( batch_size, num_patch_height, num_patch_width, num_frames, temporal_embedding.shape[2] ).reshape(batch_size * num_patch_height * num_patch_width, num_frames, temporal_embedding.shape[2]) temporal_attention_outputs = self.temporal_attention( self.temporal_layernorm(temporal_embedding), ) attention_output = temporal_attention_outputs[0] residual_temporal = self.drop_path(attention_output) residual_temporal = residual_temporal.reshape( batch_size, num_patch_height, num_patch_width, num_frames, residual_temporal.shape[2] ).reshape(batch_size, num_patch_height * num_patch_width * num_frames, residual_temporal.shape[2]) residual_temporal = self.temporal_dense(residual_temporal) temporal_embedding = hidden_states[:, 1:, :] + residual_temporal # Spatial init_cls_token = hidden_states[:, 0, :].unsqueeze(1) cls_token = init_cls_token.repeat(1, num_frames, 1) cls_token = cls_token.reshape(batch_size * num_frames, 1, cls_token.shape[2]) spatial_embedding = temporal_embedding spatial_embedding = ( spatial_embedding.reshape( batch_size, num_patch_height, num_patch_width, num_frames, spatial_embedding.shape[2] ) .permute(0, 3, 1, 2, 4) .reshape(batch_size * num_frames, num_patch_height * num_patch_width, spatial_embedding.shape[2]) ) spatial_embedding = torch.cat((cls_token, spatial_embedding), 1) spatial_attention_outputs = self.attention( self.layernorm_before(spatial_embedding), output_attentions=output_attentions ) attention_output = spatial_attention_outputs[0] outputs = spatial_attention_outputs[1:] # add self attentions if we output attention weights residual_spatial = self.drop_path(attention_output) # Taking care of CLS token cls_token = residual_spatial[:, 0, :] cls_token = cls_token.reshape(batch_size, num_frames, cls_token.shape[1]) cls_token = torch.mean(cls_token, 1, True) # averaging for every frame residual_spatial = residual_spatial[:, 1:, :] residual_spatial = ( residual_spatial.reshape( batch_size, num_frames, num_patch_height, num_patch_width, residual_spatial.shape[2] ) .permute(0, 2, 3, 1, 4) .reshape(batch_size, num_patch_height * num_patch_width * num_frames, residual_spatial.shape[2]) ) residual = residual_spatial hidden_states = temporal_embedding # Mlp hidden_states = torch.cat((init_cls_token, hidden_states), 1) + torch.cat((cls_token, residual), 1) layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = self.output(layer_output) layer_output = hidden_states + self.drop_path(layer_output) outputs = (layer_output,) + outputs return outputs class TimesformerEncoder(nn.Module): def __init__(self, config: TimesformerConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([TimesformerLayer(config, ind) for ind in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, output_attentions, ) else: layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class TimesformerPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = TimesformerConfig base_model_prefix = "timesformer" main_input_name = "pixel_values" supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Conv2d)): nn.init.trunc_normal_(module.weight, std=self.config.initializer_range) if module.bias is not None: nn.init.constant_(module.bias, 0) elif isinstance(module, nn.LayerNorm): nn.init.constant_(module.bias, 0) nn.init.constant_(module.weight, 1.0) elif isinstance(module, TimesformerEmbeddings): nn.init.trunc_normal_(module.cls_token, std=self.config.initializer_range) nn.init.trunc_normal_(module.position_embeddings, std=self.config.initializer_range) module.patch_embeddings.apply(self._init_weights) TIMESFORMER_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`TimesformerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ TIMESFORMER_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`VideoMAEImageProcessor.preprocess`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare TimeSformer Model transformer outputting raw hidden-states without any specific head on top.", TIMESFORMER_START_DOCSTRING, ) class TimesformerModel(TimesformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = TimesformerEmbeddings(config) self.encoder = TimesformerEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(TIMESFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r""" Returns: Examples: ```python >>> import av >>> import numpy as np >>> from transformers import AutoImageProcessor, TimesformerModel >>> from huggingface_hub import hf_hub_download >>> np.random.seed(0) >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`List[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len): ... ''' ... Sample a given number of frame indices from the video. ... Args: ... clip_len (`int`): Total number of frames to sample. ... frame_sample_rate (`int`): Sample every n-th frame. ... seg_len (`int`): Maximum allowed index of sample's last frame. ... Returns: ... indices (`List[int]`): List of sampled frame indices ... ''' ... converted_len = int(clip_len * frame_sample_rate) ... end_idx = np.random.randint(converted_len, seg_len) ... start_idx = end_idx - converted_len ... indices = np.linspace(start_idx, end_idx, num=clip_len) ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) ... return indices >>> # video clip consists of 300 frames (10 seconds at 30 FPS) >>> file_path = hf_hub_download( ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" ... ) >>> container = av.open(file_path) >>> # sample 8 frames >>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=4, seg_len=container.streams.video[0].frames) >>> video = read_video_pyav(container, indices) >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base") >>> model = TimesformerModel.from_pretrained("facebook/timesformer-base-finetuned-k400") >>> # prepare video for the model >>> inputs = image_processor(list(video), return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 1569, 768] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if self.layernorm is not None: sequence_output = self.layernorm(sequence_output) if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """TimeSformer Model transformer with a video classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet.""", TIMESFORMER_START_DOCSTRING, ) class TimesformerForVideoClassification(TimesformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.timesformer = TimesformerModel(config) # Classifier head self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(TIMESFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> import av >>> import torch >>> import numpy as np >>> from transformers import AutoImageProcessor, TimesformerForVideoClassification >>> from huggingface_hub import hf_hub_download >>> np.random.seed(0) >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`List[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len): ... ''' ... Sample a given number of frame indices from the video. ... Args: ... clip_len (`int`): Total number of frames to sample. ... frame_sample_rate (`int`): Sample every n-th frame. ... seg_len (`int`): Maximum allowed index of sample's last frame. ... Returns: ... indices (`List[int]`): List of sampled frame indices ... ''' ... converted_len = int(clip_len * frame_sample_rate) ... end_idx = np.random.randint(converted_len, seg_len) ... start_idx = end_idx - converted_len ... indices = np.linspace(start_idx, end_idx, num=clip_len) ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) ... return indices >>> # video clip consists of 300 frames (10 seconds at 30 FPS) >>> file_path = hf_hub_download( ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" ... ) >>> container = av.open(file_path) >>> # sample 8 frames >>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=1, seg_len=container.streams.video[0].frames) >>> video = read_video_pyav(container, indices) >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics") >>> model = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400") >>> inputs = image_processor(list(video), return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) ... logits = outputs.logits >>> # model predicts one of the 400 Kinetics-400 classes >>> predicted_label = logits.argmax(-1).item() >>> print(model.config.id2label[predicted_label]) eating spaghetti ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.timesformer( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0][:, 0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/timesformer/modeling_timesformer.py/0
{ "file_path": "transformers/src/transformers/models/timesformer/modeling_timesformer.py", "repo_id": "transformers", "token_count": 15092 }
368
# coding=utf-8 # Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License=, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing=, software # distributed under the License is distributed on an "AS IS" BASIS=, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TVP model configuration""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) TVP_PRETRAINED_CONFIG_ARCHIVE_MAP = { "Intel/tvp-base": "https://huggingface.co/Intel/tvp-base/resolve/main/config.json", } class TvpConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TvpModel`]. It is used to instantiate an Tvp model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Tvp [Intel/tvp-base](https://huggingface.co/Intel/tvp-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone_config (`PretrainedConfig` or `dict`, *optional*): The configuration of the backbone model. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, defaults to `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. distance_loss_weight (`float`, *optional*, defaults to 1.0): The weight of distance loss. duration_loss_weight (`float`, *optional*, defaults to 0.1): The weight of duration loss. visual_prompter_type (`str`, *optional*, defaults to `"framepad"`): Visual prompt type. The type of padding. Framepad means padding on each frame. Should be one of "framepad" or "framedownpad" visual_prompter_apply (`str`, *optional*, defaults to `"replace"`): The way of applying visual prompt. Replace means use the value of prompt to change the original value in visual inputs. Should be one of "replace", or "add", or "remove". visual_prompt_size (`int`, *optional*, defaults to 96): The size of visual prompt. max_img_size (`int`, *optional*, defaults to 448): The maximum size of frame. num_frames (`int`, *optional*, defaults to 48): The number of frames extracted from a video. vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the Tvp text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`TvpModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). max_grid_col_position_embeddings (`int`, *optional*, defaults to 100): The largest number of horizontal patches from a video frame. max_grid_row_position_embeddings (`int`, *optional*, defaults to 100): The largest number of vertical patches from a video frame. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability of hidden layers. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability of attention layers. """ model_type = "tvp" def __init__( self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, distance_loss_weight=1.0, duration_loss_weight=0.1, visual_prompter_type="framepad", visual_prompter_apply="replace", visual_prompt_size=96, max_img_size=448, num_frames=48, vocab_size=30522, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, max_position_embeddings=512, max_grid_col_position_embeddings=100, max_grid_row_position_embeddings=100, hidden_dropout_prob=0.1, hidden_act="gelu", layer_norm_eps=1e-12, initializer_range=0.02, attention_probs_dropout_prob=0.1, **kwargs, ): super().__init__(**kwargs) if use_pretrained_backbone: raise ValueError("Pretrained backbones are not supported yet.") if backbone_config is not None and backbone is not None: raise ValueError("You can't specify both `backbone` and `backbone_config`.") if backbone_config is None and backbone is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"]) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) self.backbone_config = backbone_config self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.distance_loss_weight = distance_loss_weight self.duration_loss_weight = duration_loss_weight self.visual_prompter_type = visual_prompter_type self.visual_prompter_apply = visual_prompter_apply self.visual_prompt_size = visual_prompt_size self.max_img_size = max_img_size self.num_frames = num_frames self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.max_grid_col_position_embeddings = max_grid_col_position_embeddings self.max_grid_row_position_embeddings = max_grid_row_position_embeddings self.layer_norm_eps = layer_norm_eps self.hidden_dropout_prob = hidden_dropout_prob self.hidden_act = hidden_act self.initializer_range = initializer_range self.attention_probs_dropout_prob = attention_probs_dropout_prob @classmethod def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs): """Instantiate a [`TvpConfig`] (or a derived class) from a pre-trained backbone model configuration. Args: backbone_config ([`PretrainedConfig`]): The backbone configuration. Returns: [`TvpConfig`]: An instance of a configuration object """ return cls(backbone_config=backbone_config, **kwargs) def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns: `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, """ output = copy.deepcopy(self.__dict__) if output["backbone_config"] is not None: output["backbone_config"] = self.backbone_config.to_dict() output["model_type"] = self.__class__.model_type return output
transformers/src/transformers/models/tvp/configuration_tvp.py/0
{ "file_path": "transformers/src/transformers/models/tvp/configuration_tvp.py", "repo_id": "transformers", "token_count": 3686 }
369
# coding=utf-8 # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch UniSpeechSat model.""" import math import warnings from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import ( BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_peft_available, logging, replace_return_docstrings, ) from .configuration_unispeech_sat import UniSpeechSatConfig logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 2 # General docstring _CONFIG_FOR_DOC = "UniSpeechSatConfig" # Base docstring _CHECKPOINT_FOR_DOC = "microsoft/unispeech-sat-base-100h-libri-ft" _EXPECTED_OUTPUT_SHAPE = [1, 292, 768] # CTC docstring _CTC_EXPECTED_OUTPUT = "'MISTER QUILDER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'" _CTC_EXPECTED_LOSS = 39.88 # Frame class docstring _FRAME_CLASS_CHECKPOINT = "microsoft/unispeech-sat-base-plus-sd" _FRAME_EXPECTED_OUTPUT = [0, 0] # Speaker Verification docstring _XVECTOR_CHECKPOINT = "microsoft/unispeech-sat-base-plus-sv" _XVECTOR_EXPECTED_OUTPUT = 0.97 UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST = [ # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat ] @dataclass class UniSpeechSatForPreTrainingOutput(ModelOutput): """ Output type of [`UniSpeechSatForPreTrainingOutput`], with potential hidden states and attentions. Args: loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss. projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked projected quantized states. projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive target vectors for contrastive loss. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None projected_states: torch.FloatTensor = None projected_quantized_states: torch.FloatTensor = None codevector_perplexity: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: The shape for which to compute masks. This should be of a tuple of size 2 where the first element is the batch size and the second element is the length of the axis to span. mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of independently generated mask spans of length `mask_length` is computed by `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the actual percentage will be smaller. mask_length: size of the mask min_masks: minimum number of masked spans attention_mask: A (right-padded) attention mask which independently shortens the feature axis of each batch dimension. """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}" f" and `sequence_length`: {sequence_length}`" ) # epsilon is used for probabilistic rounding epsilon = np.random.rand(1).item() def compute_num_masked_span(input_length): """Given input length, compute how many spans should be masked""" num_masked_span = int(mask_prob * input_length / mask_length + epsilon) num_masked_span = max(num_masked_span, min_masks) # make sure num masked span <= sequence_length if num_masked_span * mask_length > sequence_length: num_masked_span = sequence_length // mask_length # make sure num_masked span is also <= input_length - (mask_length - 1) if input_length - (mask_length - 1) < num_masked_span: num_masked_span = max(input_length - (mask_length - 1), 0) return num_masked_span # compute number of masked spans in batch input_lengths = ( attention_mask.sum(-1).detach().tolist() if attention_mask is not None else [sequence_length for _ in range(batch_size)] ) # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) spec_aug_mask_idxs = [] max_num_masked_span = compute_num_masked_span(sequence_length) if max_num_masked_span == 0: return spec_aug_mask for input_length in input_lengths: # compute num of masked spans for this input num_masked_span = compute_num_masked_span(input_length) # get random indices to mask spec_aug_mask_idx = np.random.choice( np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False ) # pick first sampled index that will serve as a dummy index to pad vector # to ensure same dimension for all batches due to probabilistic rounding # Picking first sample just pads those vectors twice. if len(spec_aug_mask_idx) == 0: # this case can only happen if `input_length` is strictly smaller then # `sequence_length` in which case the last token has to be a padding # token which we can use as a dummy mask id dummy_mask_idx = sequence_length - 1 else: dummy_mask_idx = spec_aug_mask_idx[0] spec_aug_mask_idx = np.concatenate( [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx] ) spec_aug_mask_idxs.append(spec_aug_mask_idx) spec_aug_mask_idxs = np.array(spec_aug_mask_idxs) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to( spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length) ) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length) # add offset to the starting indexes so that indexes now create a span offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape( batch_size, max_num_masked_span * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # ensure that we cannot have indices larger than sequence_length if spec_aug_mask_idxs.max() > sequence_length - 1: spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1 # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) return spec_aug_mask # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->UniSpeechSat class UniSpeechSatNoLayerNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->UniSpeechSat class UniSpeechSatLayerNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->UniSpeechSat class UniSpeechSatGroupNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->UniSpeechSat class UniSpeechSatPositionalConvEmbedding(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups, ) weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0): self.conv = weight_norm(self.conv, name="weight", dim=2) deepspeed.zero.register_external_parameter(self, self.conv.weight_v) deepspeed.zero.register_external_parameter(self, self.conv.weight_g) else: self.conv = weight_norm(self.conv, name="weight", dim=2) self.padding = UniSpeechSatSamePadLayer(config.num_conv_pos_embeddings) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = hidden_states.transpose(1, 2) hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->UniSpeechSat class UniSpeechSatSamePadLayer(nn.Module): def __init__(self, num_conv_pos_embeddings): super().__init__() self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def forward(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, :, : -self.num_pad_remove] return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->UniSpeechSat class UniSpeechSatFeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" def __init__(self, config): super().__init__() if config.feat_extract_norm == "group": conv_layers = [UniSpeechSatGroupNormConvLayer(config, layer_id=0)] + [ UniSpeechSatNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1) ] elif config.feat_extract_norm == "layer": conv_layers = [ UniSpeechSatLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers) ] else: raise ValueError( f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']" ) self.conv_layers = nn.ModuleList(conv_layers) self.gradient_checkpointing = False self._requires_grad = True def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def forward(self, input_values): hidden_states = input_values[:, None] # make sure hidden_states require grad for gradient_checkpointing if self._requires_grad and self.training: hidden_states.requires_grad = True for conv_layer in self.conv_layers: if self._requires_grad and self.gradient_checkpointing and self.training: hidden_states = self._gradient_checkpointing_func( conv_layer.__call__, hidden_states, ) else: hidden_states = conv_layer(hidden_states) return hidden_states class UniSpeechSatFeatureExtractor(UniSpeechSatFeatureEncoder): def __init__(self, config): super().__init__(config) warnings.warn( f"The class `{self.__class__.__name__}` has been depreciated " "and will be removed in Transformers v5. " f"Use `{self.__class__.__bases__[0].__name__}` instead.", FutureWarning, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->UniSpeechSat class UniSpeechSatFeatureProjection(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps) self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size) self.dropout = nn.Dropout(config.feat_proj_dropout) def forward(self, hidden_states): # non-projected hidden states are needed for quantization norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states, norm_hidden_states # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->UniSpeechSat class UniSpeechSatAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[UniSpeechSatConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->UniSpeechSat class UniSpeechSatFeedForward(nn.Module): def __init__(self, config): super().__init__() self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout) def forward(self, hidden_states): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->UniSpeechSat class UniSpeechSatEncoderLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = UniSpeechSatAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = UniSpeechSatFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, output_attentions=False): attn_residual = hidden_states hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AttnAdapterLayer with Wav2Vec2->UniSpeechSat class UniSpeechSatAttnAdapterLayer(nn.Module): def __init__(self, config): """ Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed up training throughput. """ super().__init__() self.input_dim = config.adapter_attn_dim self.hidden_dim = config.hidden_size self.norm = nn.LayerNorm(self.hidden_dim) self.linear_1 = nn.Linear(self.hidden_dim, self.input_dim) self.act_fn = nn.ReLU() self.linear_2 = nn.Linear(self.input_dim, self.hidden_dim) def forward(self, hidden_states: torch.FloatTensor): hidden_states = self.norm(hidden_states) hidden_states = self.linear_1(hidden_states) hidden_states = self.act_fn(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->UniSpeechSat class UniSpeechSatEncoderLayerStableLayerNorm(nn.Module): def __init__(self, config): super().__init__() self.attention = UniSpeechSatAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = UniSpeechSatFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if getattr(config, "adapter_attn_dim", None) is not None: self.adapter_layer = UniSpeechSatAttnAdapterLayer(config) else: self.adapter_layer = None def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states)) if self.adapter_layer is not None: hidden_states = hidden_states + self.adapter_layer(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Encoder with Wav2Vec2->UniSpeechSat class UniSpeechSatEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = UniSpeechSatPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([UniSpeechSatEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens output 0 expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 # extend attention_mask attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min attention_mask = attention_mask.expand( attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1] ) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer.__call__, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderStableLayerNorm with Wav2Vec2->UniSpeechSat class UniSpeechSatEncoderStableLayerNorm(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = UniSpeechSatPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList( [UniSpeechSatEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens are not attended to expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 # extend attention_mask attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min attention_mask = attention_mask.expand( attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1] ) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer.__call__, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class UniSpeechSatGumbelVectorQuantizer(nn.Module): """ Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information. """ def __init__(self, config): super().__init__() self.num_groups = config.num_codevector_groups self.num_vars = config.num_codevectors_per_group if config.codevector_dim % self.num_groups != 0: raise ValueError( f"`config.codevector_dim {config.codevector_dim} must be divisible by `config.num_codevector_groups`" f" {self.num_groups} for concatenation" ) # storage for codebook variables (codewords) self.codevectors = nn.Parameter( torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups) ) self.weight_proj = nn.Linear(config.hidden_size, self.num_groups * self.num_vars) # can be decayed for training self.temperature = 2 @staticmethod def _compute_perplexity(probs, mask=None): marginal_probs = probs.mean(dim=0) perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum() return perplexity def forward(self, hidden_states): batch_size, sequence_length, hidden_size = hidden_states.shape # project to codevector dim hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1) if self.training: # sample code vector probs via gumbel in differentiateable way codevector_probs = nn.functional.gumbel_softmax( hidden_states.float(), tau=self.temperature, hard=True ).type_as(hidden_states) # compute perplexity codevector_soft_dist = torch.softmax( hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1 ) perplexity = self._compute_perplexity(codevector_soft_dist) else: # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(dim=-1) codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_( -1, codevector_idx.view(-1, 1), 1.0 ) codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs) codevector_probs = codevector_probs.view(batch_size * sequence_length, -1) # use probs to retrieve codevectors codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1) return codevectors, perplexity class UniSpeechSatPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = UniSpeechSatConfig base_model_prefix = "unispeech_sat" main_input_name = "input_values" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" # gumbel softmax requires special init if isinstance(module, UniSpeechSatGumbelVectorQuantizer): module.weight_proj.weight.data.normal_(mean=0.0, std=1) module.weight_proj.bias.data.zero_() nn.init.uniform_(module.codevectors) elif isinstance(module, UniSpeechSatPositionalConvEmbedding): nn.init.normal_( module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)), ) nn.init.constant_(module.conv.bias, 0) elif isinstance(module, UniSpeechSatFeatureProjection): k = math.sqrt(1 / module.projection.in_features) nn.init.uniform_(module.projection.weight, a=-k, b=k) nn.init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the convolutional layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) return input_lengths def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros( (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values before the output lengths idxs are attended to attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask UNISPEECH_SAT_START_DOCSTRING = r""" UniSpeechSat was proposed in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.). This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`UniSpeechSatConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ UNISPEECH_SAT_INPUTS_DOCSTRING = r""" Args: input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) <Tip warning={true}> `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask == True`. For all models whose processor has `config.return_attention_mask == False`, such as [microsoft/unispeech-sat-base-100h-libri-ft](https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft), `attention_mask` should **not** be passed to avoid degraded performance when doing batched inference. For such models `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different results depending on whether `input_values` is padded or not. </Tip> output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare UniSpeechSat Model transformer outputting raw hidden-states without any specific head on top.", UNISPEECH_SAT_START_DOCSTRING, ) class UniSpeechSatModel(UniSpeechSatPreTrainedModel): def __init__(self, config: UniSpeechSatConfig): super().__init__(config) self.config = config self.feature_extractor = UniSpeechSatFeatureEncoder(config) self.feature_projection = UniSpeechSatFeatureProjection(config) self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_()) if config.do_stable_layer_norm: self.encoder = UniSpeechSatEncoderStableLayerNorm(config) else: self.encoder = UniSpeechSatEncoder(config) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, ): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://arxiv.org/abs/1904.08779). """ # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return hidden_states # generate indices & apply SpecAugment along time axis batch_size, sequence_length, hidden_size = hidden_states.size() if mask_time_indices is not None: # apply SpecAugment along time axis with given mask_time_indices hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) elif self.config.mask_time_prob > 0 and self.training: mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks, ) mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool) hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) if self.config.mask_feature_prob > 0 and self.training: # generate indices & apply SpecAugment along feature axis mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks, ) mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool) mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1) hidden_states[mask_feature_indices] = 0 return hidden_states @add_start_docstrings_to_model_forward(UNISPEECH_SAT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Wav2Vec2BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, mask_time_indices: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Wav2Vec2BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict extract_features = self.feature_extractor(input_values) extract_features = extract_features.transpose(1, 2) if attention_mask is not None: # compute reduced attention_mask corresponding to feature vectors attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask) hidden_states, extract_features = self.feature_projection(extract_features) hidden_states = self._mask_hidden_states( hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask ) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return Wav2Vec2BaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings("""UniSpeechSat Model with a quantizer and `VQ` head on top.""", UNISPEECH_SAT_START_DOCSTRING) class UniSpeechSatForPreTraining(UniSpeechSatPreTrainedModel): def __init__(self, config: UniSpeechSatConfig): super().__init__(config) self.unispeech_sat = UniSpeechSatModel(config) self.dropout_features = nn.Dropout(config.feat_quantizer_dropout) self.quantizer = UniSpeechSatGumbelVectorQuantizer(config) self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim) self.project_hid = nn.Linear(config.hidden_size, config.proj_codevector_dim) self.dropout = nn.Dropout(config.final_dropout) self.speaker_proj = nn.Linear(config.hidden_size, config.codevector_dim) self.label_embeddings_concat = nn.Parameter(torch.FloatTensor(config.num_clusters, config.codevector_dim)) self.label_embeddings_concat.data.zero_() self.layer_norm_for_extract = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if self.config.do_stable_layer_norm: self.layer_norm_for_extract.requires_grad = False # Initialize weights and apply final processing self.post_init() def set_gumbel_temperature(self, temperature: int): """ Set the Gumbel softmax temperature to a given value. Only necessary for training """ self.quantizer.temperature = temperature def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wav2vec2.feature_extractor._freeze_parameters() @staticmethod def compute_contrastive_logits( target_features: torch.FloatTensor, negative_features: torch.FloatTensor, predicted_features: torch.FloatTensor, temperature: int = 1, ): """ Compute logits for contrastive loss based using cosine similarity as the distance measure between `[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied. """ target_features = torch.cat([target_features, negative_features], dim=0) logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1) logits = logits.type_as(target_features) # apply temperature logits = logits / temperature return logits @add_start_docstrings_to_model_forward(UNISPEECH_SAT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=UniSpeechSatForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, UniSpeechSatForPreTrainingOutput]: r""" Returns: Example: ```python >>> import torch >>> from transformers import AutoFeatureExtractor, UniSpeechSatForPreTraining >>> from transformers.models.unispeech_sat.modeling_unispeech_sat import _compute_mask_indices >>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/unispeech-sat-base") >>> model = UniSpeechSatForPreTraining.from_pretrained("microsoft/unispeech-sat-base") >>> # TODO: Add full pretraining example ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.unispeech_sat( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) transformer_features = outputs[0] # quantize all (unmasked) extracted features and project to final vq dim extract_features = self.dropout_features(outputs[1]) # TODO(PVP) - add pretraining logic and add to tests logits = extract_features loss = quantized_features = codevector_perplexity = None # layer normalization (has no effect when `config.do_stable_layer_norm == False`) # extract_features = self.layer_norm_for_extract(extract_features) # quantized_features, codevector_perplexity = self.quantizer(extract_features) # # project quantized features twice # quantized_features = self.project_q(quantized_features) # quantized_features = self.project_hid(quantized_features) # # loss = None # logits = quantized_features if not return_dict: if loss is not None: return (loss, logits, transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return (logits, transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return UniSpeechSatForPreTrainingOutput( loss=loss, logits=logits, projected_states=transformer_features, projected_quantized_states=quantized_features, codevector_perplexity=codevector_perplexity, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """UniSpeechSat Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""", UNISPEECH_SAT_START_DOCSTRING, """ target_lang (`str`, *optional*): Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or adapter.<lang>.bin. Only relevant when using an instance of [`UniSpeechSatForCTC`] with adapters. Uses 'eng' by default. """, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->UniSpeechSat, wav2vec2->unispeech_sat, WAV_2_VEC_2->UNISPEECH_SAT class UniSpeechSatForCTC(UniSpeechSatPreTrainedModel): def __init__(self, config, target_lang: Optional[str] = None): super().__init__(config) self.unispeech_sat = UniSpeechSatModel(config) self.dropout = nn.Dropout(config.final_dropout) self.target_lang = target_lang if config.vocab_size is None: raise ValueError( f"You are trying to instantiate {self.__class__} with a configuration that " "does not define the vocabulary size of the language model head. Please " "instantiate the model as follows: `UniSpeechSatForCTC.from_pretrained(..., vocab_size=vocab_size)`. " "or define `vocab_size` of your model's configuration." ) output_hidden_size = ( config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size ) self.lm_head = nn.Linear(output_hidden_size, config.vocab_size) # Initialize weights and apply final processing self.post_init() def tie_weights(self): """ This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when passing `target_lang=...` to `from_pretrained(...)`. This method is **not** supposed to be called by the user and is prone to be changed in the future. """ # Note that `tie_weights` is usually used to tie input and output embedding weights. The method is re-purposed to # correctly load adapter layers for UniSpeechSat so that we do not have to introduce a new API to # [`PreTrainedModel`]. While slightly hacky, UniSpeechSat never has to tie input and output embeddings, so that it is # ok to repurpose this function here. target_lang = self.target_lang if target_lang is not None and getattr(self.config, "adapter_attn_dim", None) is None: raise ValueError(f"Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.") elif target_lang is None and getattr(self.config, "adapter_attn_dim", None) is not None: logger.info("By default `target_lang` is set to 'eng'.") elif target_lang is not None: self.load_adapter(target_lang, force_load=True) def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech_sat.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.unispeech_sat.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(UNISPEECH_SAT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, expected_output=_CTC_EXPECTED_OUTPUT, expected_loss=_CTC_EXPECTED_LOSS, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.unispeech_sat( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) loss = None if labels is not None: if labels.max() >= self.config.vocab_size: raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") # retrieve loss input_lengths from attention_mask attention_mask = ( attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long) ) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) # ctc_loss doesn't support fp16 log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss( log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity, ) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return CausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) @add_start_docstrings( """ UniSpeechSat Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting. """, UNISPEECH_SAT_START_DOCSTRING, ) class UniSpeechSatForSequenceClassification(UniSpeechSatPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Sequence classification does not support the use of UniSpeechSat adapters (config.add_adapter=True)" ) self.unispeech_sat = UniSpeechSatModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_feature_extractor def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_feature_encoder with wav2vec2->unispeech_sat def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech_sat.feature_extractor._freeze_parameters() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_base_model with wav2vec2->unispeech_sat def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.unispeech_sat.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(UNISPEECH_SAT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.forward with Wav2Vec2->UniSpeechSat, wav2vec2->unispeech_sat def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.unispeech_sat( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = hidden_states.mean(dim=1) else: padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) hidden_states[~padding_mask] = 0.0 pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ UniSpeech-SAT Model with a frame classification head on top for tasks like Speaker Diarization. """, UNISPEECH_SAT_START_DOCSTRING, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForAudioFrameClassification with Wav2Vec2->UniSpeechSat, wav2vec2->unispeech_sat, WAV_2_VEC_2->UNISPEECH_SAT class UniSpeechSatForAudioFrameClassification(UniSpeechSatPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Audio frame classification does not support the use of UniSpeechSat adapters (config.add_adapter=True)" ) self.unispeech_sat = UniSpeechSatModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.num_labels = config.num_labels self.init_weights() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech_sat.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.unispeech_sat.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(UNISPEECH_SAT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_FRAME_CLASS_CHECKPOINT, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_FRAME_EXPECTED_OUTPUT, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.unispeech_sat( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.AMSoftmaxLoss class AMSoftmaxLoss(nn.Module): def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4): super(AMSoftmaxLoss, self).__init__() self.scale = scale self.margin = margin self.num_labels = num_labels self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True) self.loss = nn.CrossEntropyLoss() def forward(self, hidden_states, labels): labels = labels.flatten() weight = nn.functional.normalize(self.weight, dim=0) hidden_states = nn.functional.normalize(hidden_states, dim=1) cos_theta = torch.mm(hidden_states, weight) psi = cos_theta - self.margin onehot = nn.functional.one_hot(labels, self.num_labels) logits = self.scale * torch.where(onehot.bool(), psi, cos_theta) loss = self.loss(logits, labels) return loss # Copied from transformers.models.wav2vec2.modeling_wav2vec2.TDNNLayer class TDNNLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id] self.out_conv_dim = config.tdnn_dim[layer_id] self.kernel_size = config.tdnn_kernel[layer_id] self.dilation = config.tdnn_dilation[layer_id] self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim) self.activation = nn.ReLU() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if is_peft_available(): from peft.tuners.lora import LoraLayer if isinstance(self.kernel, LoraLayer): warnings.warn( "Detected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. " "You should exclude TDNNLayer from LoRA's target modules.", ) # for backward compatibility, we keep nn.Linear but call F.conv1d for speed up hidden_states = hidden_states.transpose(1, 2) weight = self.kernel.weight.view(self.out_conv_dim, self.kernel_size, self.in_conv_dim).transpose(1, 2) hidden_states = nn.functional.conv1d(hidden_states, weight, self.kernel.bias, dilation=self.dilation) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.activation(hidden_states) return hidden_states @add_start_docstrings( """ UniSpeech-SAT Model with an XVector feature extraction head on top for tasks like Speaker Verification. """, UNISPEECH_SAT_START_DOCSTRING, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForXVector with Wav2Vec2->UniSpeechSat, wav2vec2->unispeech_sat, WAV_2_VEC_2->UNISPEECH_SAT class UniSpeechSatForXVector(UniSpeechSatPreTrainedModel): def __init__(self, config): super().__init__(config) self.unispeech_sat = UniSpeechSatModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0]) tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))] self.tdnn = nn.ModuleList(tdnn_layers) self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim) self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim) self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels) self.init_weights() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech_sat.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.unispeech_sat.parameters(): param.requires_grad = False def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the TDNN layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size in self.config.tdnn_kernel: input_lengths = _conv_out_length(input_lengths, kernel_size, 1) return input_lengths @add_start_docstrings_to_model_forward(UNISPEECH_SAT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_XVECTOR_CHECKPOINT, output_type=XVectorOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_XVECTOR_EXPECTED_OUTPUT, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, XVectorOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.unispeech_sat( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) for tdnn_layer in self.tdnn: hidden_states = tdnn_layer(hidden_states) # Statistic Pooling if attention_mask is None: mean_features = hidden_states.mean(dim=1) std_features = hidden_states.std(dim=1) else: feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1)) tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths) mean_features = [] std_features = [] for i, length in enumerate(tdnn_output_lengths): mean_features.append(hidden_states[i, :length].mean(dim=0)) std_features.append(hidden_states[i, :length].std(dim=0)) mean_features = torch.stack(mean_features) std_features = torch.stack(std_features) statistic_pooling = torch.cat([mean_features, std_features], dim=-1) output_embeddings = self.feature_extractor(statistic_pooling) logits = self.classifier(output_embeddings) loss = None if labels is not None: loss = self.objective(logits, labels) if not return_dict: output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return XVectorOutput( loss=loss, logits=logits, embeddings=output_embeddings, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py/0
{ "file_path": "transformers/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py", "repo_id": "transformers", "token_count": 36855 }
370
# coding=utf-8 # Copyright 2022 Multimedia Computing Group, Nanjing University and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch VideoMAE (masked autoencoder) model.""" import collections.abc import math from copy import deepcopy from dataclasses import dataclass from typing import Optional, Set, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from .configuration_videomae import VideoMAEConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "VideoMAEConfig" _CHECKPOINT_FOR_DOC = "MCG-NJU/videomae-base" VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST = [ "MCG-NJU/videomae-base", # See all VideoMAE models at https://huggingface.co/models?filter=videomae ] @dataclass class VideoMAEDecoderOutput(ModelOutput): """ Class for VideoMAEDecoder's outputs, with potential hidden states and attentions. Args: logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`): Pixel reconstruction logits. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class VideoMAEForPreTrainingOutput(ModelOutput): """ Class for VideoMAEForPreTraining's outputs, with potential hidden states and attentions. Args: loss (`torch.FloatTensor` of shape `(1,)`): Pixel reconstruction loss. logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`): Pixel reconstruction logits. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None # sin-cos position encoding # https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31 def get_sinusoid_encoding_table(n_position, d_hid): """Sinusoid position encoding table""" # TODO: make it with torch instead of numpy def get_position_angle_vec(position): return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 return torch.FloatTensor(sinusoid_table).unsqueeze(0) class VideoMAEEmbeddings(nn.Module): """ Construct the patch and position embeddings. """ def __init__(self, config): super().__init__() self.patch_embeddings = VideoMAEPatchEmbeddings(config) self.num_patches = self.patch_embeddings.num_patches # fixed sin-cos embedding self.position_embeddings = get_sinusoid_encoding_table(self.num_patches, config.hidden_size) self.config = config def forward(self, pixel_values, bool_masked_pos): # create patch embeddings embeddings = self.patch_embeddings(pixel_values) # add position embeddings embeddings = embeddings + self.position_embeddings.type_as(embeddings).to(embeddings.device).clone().detach() # only keep visible patches # ~bool_masked_pos means visible if bool_masked_pos is not None: batch_size, _, num_channels = embeddings.shape embeddings = embeddings[~bool_masked_pos] embeddings = embeddings.reshape(batch_size, -1, num_channels) return embeddings class VideoMAEPatchEmbeddings(nn.Module): """ Video to Patch Embedding. This module turns a batch of videos of shape (batch_size, num_frames, num_channels, height, width) into a tensor of shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder. The seq_len (the number of patches) equals (number of frames // tubelet_size) * (height // patch_size) * (width // patch_size). """ def __init__(self, config): super().__init__() image_size = config.image_size patch_size = config.patch_size num_channels = config.num_channels hidden_size = config.hidden_size num_frames = config.num_frames tubelet_size = config.tubelet_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) self.image_size = image_size self.patch_size = patch_size self.tubelet_size = int(tubelet_size) num_patches = ( (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) * (num_frames // self.tubelet_size) ) self.num_channels = num_channels self.num_patches = num_patches self.projection = nn.Conv3d( in_channels=num_channels, out_channels=hidden_size, kernel_size=(self.tubelet_size, patch_size[0], patch_size[1]), stride=(self.tubelet_size, patch_size[0], patch_size[1]), ) def forward(self, pixel_values): batch_size, num_frames, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." ) # permute to (batch_size, num_channels, num_frames, height, width) pixel_values = pixel_values.permute(0, 2, 1, 3, 4) embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) return embeddings class VideoMAESelfAttention(nn.Module): def __init__(self, config: VideoMAEConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=False) if config.qkv_bias: self.q_bias = nn.Parameter(torch.zeros(self.all_head_size)) self.v_bias = nn.Parameter(torch.zeros(self.all_head_size)) else: self.q_bias = None self.v_bias = None self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: k_bias = torch.zeros_like(self.v_bias, requires_grad=False) if self.q_bias is not None else None keys = nn.functional.linear(input=hidden_states, weight=self.key.weight, bias=k_bias) values = nn.functional.linear(input=hidden_states, weight=self.value.weight, bias=self.v_bias) queries = nn.functional.linear(input=hidden_states, weight=self.query.weight, bias=self.q_bias) key_layer = self.transpose_for_scores(keys) value_layer = self.transpose_for_scores(values) query_layer = self.transpose_for_scores(queries) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->VideoMAE class VideoMAESelfOutput(nn.Module): """ The residual connection is defined in VideoMAELayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: VideoMAEConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->VideoMAE class VideoMAEAttention(nn.Module): def __init__(self, config: VideoMAEConfig) -> None: super().__init__() self.attention = VideoMAESelfAttention(config) self.output = VideoMAESelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.vit.modeling_vit.ViTIntermediate ViT->VideoMAE class VideoMAEIntermediate(nn.Module): def __init__(self, config: VideoMAEConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTOutput ViT->VideoMAE class VideoMAEOutput(nn.Module): def __init__(self, config: VideoMAEConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->VideoMAE class VideoMAELayer(nn.Module): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: VideoMAEConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = VideoMAEAttention(config) self.intermediate = VideoMAEIntermediate(config) self.output = VideoMAEOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in VideoMAE, layernorm is applied before self-attention head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + hidden_states # in VideoMAE, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs # Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->VideoMAE class VideoMAEEncoder(nn.Module): def __init__(self, config: VideoMAEConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([VideoMAELayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class VideoMAEPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = VideoMAEConfig base_model_prefix = "videomae" main_input_name = "pixel_values" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv3d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) VIDEOMAE_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`VideoMAEConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ VIDEOMAE_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`VideoMAEImageProcessor.__call__`] for details. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare VideoMAE Model transformer outputting raw hidden-states without any specific head on top.", VIDEOMAE_START_DOCSTRING, ) class VideoMAEModel(VideoMAEPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = VideoMAEEmbeddings(config) self.encoder = VideoMAEEncoder(config) if config.use_mean_pooling: self.layernorm = None else: self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(VIDEOMAE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the batch must have the same number of masked patches. If `None`, then all patches are considered. Sequence length is `(num_frames // tubelet_size) * (image_size // patch_size) ** 2`. Returns: Examples: ```python >>> import av >>> import numpy as np >>> from transformers import AutoImageProcessor, VideoMAEModel >>> from huggingface_hub import hf_hub_download >>> np.random.seed(0) >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`List[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len): ... ''' ... Sample a given number of frame indices from the video. ... Args: ... clip_len (`int`): Total number of frames to sample. ... frame_sample_rate (`int`): Sample every n-th frame. ... seg_len (`int`): Maximum allowed index of sample's last frame. ... Returns: ... indices (`List[int]`): List of sampled frame indices ... ''' ... converted_len = int(clip_len * frame_sample_rate) ... end_idx = np.random.randint(converted_len, seg_len) ... start_idx = end_idx - converted_len ... indices = np.linspace(start_idx, end_idx, num=clip_len) ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) ... return indices >>> # video clip consists of 300 frames (10 seconds at 30 FPS) >>> file_path = hf_hub_download( ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" ... ) >>> container = av.open(file_path) >>> # sample 16 frames >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames) >>> video = read_video_pyav(container, indices) >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base") >>> model = VideoMAEModel.from_pretrained("MCG-NJU/videomae-base") >>> # prepare video for the model >>> inputs = image_processor(list(video), return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 1568, 768] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(pixel_values, bool_masked_pos) encoder_outputs = self.encoder( embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if self.layernorm is not None: sequence_output = self.layernorm(sequence_output) if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class VideoMAEDecoder(nn.Module): def __init__(self, config, num_patches): super().__init__() decoder_num_labels = config.num_channels * config.tubelet_size * config.patch_size**2 decoder_config = deepcopy(config) decoder_config.hidden_size = config.decoder_hidden_size decoder_config.num_hidden_layers = config.decoder_num_hidden_layers decoder_config.num_attention_heads = config.decoder_num_attention_heads decoder_config.intermediate_size = config.decoder_intermediate_size self.decoder_layers = nn.ModuleList( [VideoMAELayer(decoder_config) for _ in range(config.decoder_num_hidden_layers)] ) self.norm = nn.LayerNorm(config.decoder_hidden_size) self.head = ( nn.Linear(config.decoder_hidden_size, decoder_num_labels) if decoder_num_labels > 0 else nn.Identity() ) self.gradient_checkpointing = False self.config = config def forward( self, hidden_states, return_token_num, output_attentions=False, output_hidden_states=False, return_dict=True, ): # apply Transformer layers (blocks) all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.decoder_layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, None, output_attentions, ) else: layer_outputs = layer_module(hidden_states, head_mask=None, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if return_token_num > 0: hidden_states = hidden_states[:, -return_token_num:] # predictor projection hidden_states = self.norm(hidden_states) logits = self.head(hidden_states) if not return_dict: return tuple(v for v in [logits, all_hidden_states, all_self_attentions] if v is not None) return VideoMAEDecoderOutput(logits=logits, hidden_states=all_hidden_states, attentions=all_self_attentions) @add_start_docstrings( "The VideoMAE Model transformer with the decoder on top for self-supervised pre-training.", VIDEOMAE_START_DOCSTRING, ) class VideoMAEForPreTraining(VideoMAEPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.videomae = VideoMAEModel(config) self.encoder_to_decoder = nn.Linear(config.hidden_size, config.decoder_hidden_size, bias=False) self.mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size)) self.position_embeddings = get_sinusoid_encoding_table( self.videomae.embeddings.num_patches, config.decoder_hidden_size ) self.decoder = VideoMAEDecoder(config, num_patches=self.videomae.embeddings.num_patches) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(VIDEOMAE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=VideoMAEForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, bool_masked_pos: torch.BoolTensor, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, VideoMAEForPreTrainingOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the batch must have the same number of masked patches. Sequence length is `(num_frames // tubelet_size) * (image_size // patch_size) ** 2`. Returns: Examples: ```python >>> from transformers import AutoImageProcessor, VideoMAEForPreTraining >>> import numpy as np >>> import torch >>> num_frames = 16 >>> video = list(np.random.randint(0, 256, (num_frames, 3, 224, 224))) >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base") >>> model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base") >>> pixel_values = image_processor(video, return_tensors="pt").pixel_values >>> num_patches_per_frame = (model.config.image_size // model.config.patch_size) ** 2 >>> seq_length = (num_frames // model.config.tubelet_size) * num_patches_per_frame >>> bool_masked_pos = torch.randint(0, 2, (1, seq_length)).bool() >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos) >>> loss = outputs.loss ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.videomae( pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.encoder_to_decoder( sequence_output ) # [batch_size, num_visible_patches, decoder_hidden_size] batch_size, seq_len, num_channels = sequence_output.shape # we don't unshuffle the correct visible token order, but shuffle the position embeddings accordingly. if bool_masked_pos is None: raise ValueError("One must provided a boolean mask ") expanded_position_embeddings = self.position_embeddings.expand(batch_size, -1, -1).type_as(pixel_values) expanded_position_embeddings = expanded_position_embeddings.to(pixel_values.device).clone().detach() pos_emb_visible = expanded_position_embeddings[~bool_masked_pos].reshape(batch_size, -1, num_channels) pos_emb_mask = expanded_position_embeddings[bool_masked_pos].reshape(batch_size, -1, num_channels) # [batch_size, num_patches, decoder_hidden_size] x_full = torch.cat([sequence_output + pos_emb_visible, self.mask_token + pos_emb_mask], dim=1) # [batch_size, num_masked_patches, num_channels * patch_size * patch_size] decoder_outputs = self.decoder(x_full, pos_emb_mask.shape[1]) logits = decoder_outputs.logits loss = None with torch.no_grad(): # calculate the labels to be predicted if self.config.num_channels != 3: # Can't unnormalize with default means/stds frames = pixel_values else: # first, unnormalize the frames device = pixel_values.device dtype = pixel_values.dtype mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(device=device, dtype=dtype)[None, None, :, None, None] std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(device=device, dtype=dtype)[None, None, :, None, None] frames = pixel_values * std + mean # in [0, 1] batch_size, time, num_channels, height, width = frames.shape tubelet_size, patch_size = self.config.tubelet_size, self.config.patch_size if self.config.norm_pix_loss: # step 1: split up dimensions (time by tubelet_size, height by patch_size, width by patch_size) frames = frames.view( batch_size, time // tubelet_size, tubelet_size, num_channels, height // patch_size, patch_size, width // patch_size, patch_size, ) # step 2: move dimensions to concatenate: frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous() # step 3: concatenate: frames = frames.view( batch_size, time // tubelet_size * height // patch_size * width // patch_size, tubelet_size * patch_size * patch_size, num_channels, ) # step 4: normalize. The authors find that the mean is about 0.48 and standard deviation is about 0.08. frames_norm = (frames - frames.mean(dim=-2, keepdim=True)) / ( frames.var(dim=-2, unbiased=True, keepdim=True).sqrt() + 1e-6 ) # step 5: reshape to (batch_size, T//ts * H//ps * W//ps, ts * ps * ps * C) videos_patch = frames_norm.view( batch_size, time // tubelet_size * height // patch_size * width // patch_size, tubelet_size * patch_size * patch_size * num_channels, ) else: if self.config.num_channels != 3: raise ValueError( "Can't unnormalize non-RGB images. Consider setting config.norm_pix_loss to False." ) # step 1: split up dimensions (time by tubelet_size, height by patch_size, width by patch_size) frames = frames.view( batch_size, time // tubelet_size, tubelet_size, num_channels, height // patch_size, patch_size, width // patch_size, patch_size, ) # step 2: move dimensions to concatenate: (batch_size, T//ts, H//ps, W//ps, ts, ps, ps, C) frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous() # step 3: concatenate videos_patch = frames.view( batch_size, time // tubelet_size * height // patch_size * width // patch_size, tubelet_size * patch_size * patch_size * num_channels, ) batch_size, _, num_channels = videos_patch.shape labels = videos_patch[bool_masked_pos].reshape(batch_size, -1, num_channels) loss_fct = MSELoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return VideoMAEForPreTrainingOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """VideoMAE Model transformer with a video classification head on top (a linear layer on top of the average pooled hidden states of all tokens) e.g. for ImageNet.""", VIDEOMAE_START_DOCSTRING, ) class VideoMAEForVideoClassification(VideoMAEPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.videomae = VideoMAEModel(config) # Classifier head self.fc_norm = nn.LayerNorm(config.hidden_size) if config.use_mean_pooling else None self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(VIDEOMAE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> import av >>> import torch >>> import numpy as np >>> from transformers import AutoImageProcessor, VideoMAEForVideoClassification >>> from huggingface_hub import hf_hub_download >>> np.random.seed(0) >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`List[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len): ... ''' ... Sample a given number of frame indices from the video. ... Args: ... clip_len (`int`): Total number of frames to sample. ... frame_sample_rate (`int`): Sample every n-th frame. ... seg_len (`int`): Maximum allowed index of sample's last frame. ... Returns: ... indices (`List[int]`): List of sampled frame indices ... ''' ... converted_len = int(clip_len * frame_sample_rate) ... end_idx = np.random.randint(converted_len, seg_len) ... start_idx = end_idx - converted_len ... indices = np.linspace(start_idx, end_idx, num=clip_len) ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) ... return indices >>> # video clip consists of 300 frames (10 seconds at 30 FPS) >>> file_path = hf_hub_download( ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" ... ) >>> container = av.open(file_path) >>> # sample 16 frames >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames) >>> video = read_video_pyav(container, indices) >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics") >>> model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics") >>> inputs = image_processor(list(video), return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) ... logits = outputs.logits >>> # model predicts one of the 400 Kinetics-400 classes >>> predicted_label = logits.argmax(-1).item() >>> print(model.config.id2label[predicted_label]) eating spaghetti ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.videomae( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] if self.fc_norm is not None: sequence_output = self.fc_norm(sequence_output.mean(1)) else: sequence_output = sequence_output[:, 0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/videomae/modeling_videomae.py/0
{ "file_path": "transformers/src/transformers/models/videomae/modeling_videomae.py", "repo_id": "transformers", "token_count": 20402 }
371
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Classes to support Vision-Encoder-Text-Decoder architectures""" import gc import os import tempfile from typing import Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ...configuration_utils import PretrainedConfig from ...modeling_outputs import BaseModelOutput, Seq2SeqLMOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ..auto.configuration_auto import AutoConfig from ..auto.modeling_auto import AutoModel, AutoModelForCausalLM from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig # Copied from transformers.models.encoder_decoder.modeling_encoder_decoder.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() if decoder_start_token_id is None: raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.") shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "VisionEncoderDecoderConfig" VISION_ENCODER_DECODER_START_DOCSTRING = r""" This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via [`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`] function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like image captioning. The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. Additionally, in [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) it is shown how leveraging large pretrained vision models for optical character recognition (OCR) yields a significant performance improvement. After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information). This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`VisionEncoderDecoderConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ VISION_ENCODER_DECODER_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using an image processor (e.g. if you use ViT as the encoder, you should use [`AutoImageProcessor`]). See [`ViTImageProcessor.__call__`] for details. decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For training, `decoder_input_ids` are automatically created by the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`. decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. encoder_outputs (`tuple(torch.FloatTensor)`, *optional*): This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) is a tensor of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple. kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors: - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function. - With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function. """ @add_start_docstrings(VISION_ENCODER_DECODER_START_DOCSTRING) class VisionEncoderDecoderModel(PreTrainedModel): r""" [`VisionEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with one of the base vision model classes of the library as encoder and another one as decoder when created with the :meth*~transformers.AutoModel.from_pretrained* class method for the encoder and :meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder. """ config_class = VisionEncoderDecoderConfig base_model_prefix = "vision_encoder_decoder" main_input_name = "pixel_values" supports_gradient_checkpointing = True def __init__( self, config: Optional[PretrainedConfig] = None, encoder: Optional[PreTrainedModel] = None, decoder: Optional[PreTrainedModel] = None, ): if config is None and (encoder is None or decoder is None): raise ValueError("Either a configuration or an encoder and a decoder has to be provided.") if config is None: config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config) else: if not isinstance(config, self.config_class): raise ValueError(f"Config: {config} has to be of type {self.config_class}") if config.decoder.cross_attention_hidden_size is not None: if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size: raise ValueError( "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal" f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for" f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for" " `config.encoder.hidden_size`." ) # initialize with config # make sure input & output embeddings is not tied config.tie_word_embeddings = False super().__init__(config) if encoder is None: encoder = AutoModel.from_config(config.encoder) if decoder is None: decoder = AutoModelForCausalLM.from_config(config.decoder) self.encoder = encoder self.decoder = decoder if self.encoder.config.to_dict() != self.config.encoder.to_dict(): logger.warning( f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:" f" {self.config.encoder}" ) if self.decoder.config.to_dict() != self.config.decoder.to_dict(): logger.warning( f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:" f" {self.config.decoder}" ) # make sure that the individual model's config refers to the shared config # so that the updates to the config will be synced self.encoder.config = self.config.encoder self.decoder.config = self.config.decoder # encoder outputs might need to be projected to different dimension for decoder if ( self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None ): self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size) if self.encoder.get_output_embeddings() is not None: raise ValueError( f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head" ) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def get_output_embeddings(self): return self.decoder.get_output_embeddings() def set_output_embeddings(self, new_embeddings): return self.decoder.set_output_embeddings(new_embeddings) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Example: ```python >>> from transformers import VisionEncoderDecoderModel, AutoImageProcessor, AutoTokenizer >>> from PIL import Image >>> import requests >>> image_processor = AutoImageProcessor.from_pretrained("ydshieh/vit-gpt2-coco-en") >>> decoder_tokenizer = AutoTokenizer.from_pretrained("ydshieh/vit-gpt2-coco-en") >>> model = VisionEncoderDecoderModel.from_pretrained("ydshieh/vit-gpt2-coco-en") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> img = Image.open(requests.get(url, stream=True).raw) >>> pixel_values = image_processor(images=img, return_tensors="pt").pixel_values # Batch size 1 >>> output_ids = model.generate( ... pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True ... ).sequences >>> preds = decoder_tokenizer.batch_decode(output_ids, skip_special_tokens=True) >>> preds = [pred.strip() for pred in preds] >>> assert preds == ["a cat laying on top of a couch next to another cat"] ```""" from_tf = kwargs.pop("from_tf", False) if from_tf: from transformers import TFVisionEncoderDecoderModel # a workaround to load from tensorflow checkpoint # Using `_tf_model` won't work, because the weight names in the encoder/decoder of `_tf_model` get # extended before saving those components. For example, The name of `_tf_model.encoder.vit` is # `[top model name]/encoder/vit`, but the name of `tf_model.encoder.vit` is `[top model name]/vit`. The # [top model name] is handled (stripped) by the conversion method, and the former case gets extra `encoder`, # which should not occur when we want to save the components alone. # There was a (very) ugly potential fix, which wasn't integrated to `transformers`: see # https://github.com/huggingface/transformers/pull/13222/commits/dbb3c9de76eee235791d2064094654637c99f36d#r697304245 # (the change in `src/transformers/modeling_tf_utils.py`) _tf_model = TFVisionEncoderDecoderModel.from_pretrained( pretrained_model_name_or_path, *model_args, **kwargs ) config = _tf_model.config # Using `tf_model` instead encoder = _tf_model.encoder.__class__(_tf_model.config.encoder) decoder = _tf_model.decoder.__class__(_tf_model.config.decoder) # Make sure models are built encoder(encoder.dummy_inputs) decoder(decoder.dummy_inputs) # Get the variable correspondence between `_tf_model` and `encoder` and `decoder` encoder_variables = {} for v in encoder.trainable_variables + encoder.non_trainable_variables: encoder_variables["/".join(v.name.split("/")[1:])] = v decoder_variables = {} for v in decoder.trainable_variables + decoder.non_trainable_variables: decoder_variables["/".join(v.name.split("/")[1:])] = v _encoder_variables = {} for v in _tf_model.encoder.trainable_variables + _tf_model.encoder.non_trainable_variables: _encoder_variables["/".join(v.name.split("/")[2:])] = v _decoder_variables = {} for v in _tf_model.decoder.trainable_variables + _tf_model.decoder.non_trainable_variables: _decoder_variables["/".join(v.name.split("/")[2:])] = v # assign weight values to `encoder` and `decoder` from `_tf_model` for name, v in encoder_variables.items(): v.assign(_encoder_variables[name]) for name, v in decoder_variables.items(): v.assign(_decoder_variables[name]) tf_model = TFVisionEncoderDecoderModel(encoder=encoder, decoder=decoder) # Deal with `enc_to_dec_proj` if hasattr(_tf_model, "enc_to_dec_proj"): tf_model(tf_model.dummy_inputs) tf_model.enc_to_dec_proj.kernel.assign(_tf_model.enc_to_dec_proj.kernel) tf_model.enc_to_dec_proj.bias.assign(_tf_model.enc_to_dec_proj.bias) with tempfile.TemporaryDirectory() as tmpdirname: encoder_dir = os.path.join(tmpdirname, "encoder") decoder_dir = os.path.join(tmpdirname, "decoder") tf_model.encoder.save_pretrained(encoder_dir) tf_model.decoder.save_pretrained(decoder_dir) if hasattr(tf_model, "enc_to_dec_proj"): enc_to_dec_proj_weight = torch.transpose( torch.from_numpy(tf_model.enc_to_dec_proj.kernel.numpy()), 1, 0 ) enc_to_dec_proj_bias = torch.from_numpy(tf_model.enc_to_dec_proj.bias.numpy()) del _tf_model del tf_model gc.collect() model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_dir, decoder_dir, encoder_from_tf=True, decoder_from_tf=True ) # This is only for copying some specific attributes of this particular model. model.config = config if hasattr(model, "enc_to_dec_proj"): model.enc_to_dec_proj.weight.data = enc_to_dec_proj_weight.contiguous() model.enc_to_dec_proj.bias.data = enc_to_dec_proj_bias.contiguous() return model # At the moment fast initialization is not supported for composite models if kwargs.get("_fast_init", False): logger.warning( "Fast initialization is currently not supported for VisionEncoderDecoderModel. " "Falling back to slow initialization..." ) kwargs["_fast_init"] = False return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) @classmethod def from_encoder_decoder_pretrained( cls, encoder_pretrained_model_name_or_path: str = None, decoder_pretrained_model_name_or_path: str = None, *model_args, **kwargs, ) -> PreTrainedModel: r""" Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `model.train()`. Params: encoder_pretrained_model_name_or_path (`str`, *optional*): Information necessary to initiate the image encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. An example is `google/vit-base-patch16-224-in21k`. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case, `from_tf` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the text decoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case, `from_tf` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args (remaining positional arguments, *optional*): All remaning positional arguments will be passed to the underlying model's `__init__` method. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter. - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import VisionEncoderDecoderModel >>> # initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized >>> model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained( ... "google/vit-base-patch16-224-in21k", "bert-base-uncased" ... ) >>> # saving model after fine-tuning >>> model.save_pretrained("./vit-bert") >>> # load fine-tuned model >>> model = VisionEncoderDecoderModel.from_pretrained("./vit-bert") ```""" kwargs_encoder = { argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_") } kwargs_decoder = { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } # remove encoder, decoder kwargs from kwargs for key in kwargs_encoder.keys(): del kwargs["encoder_" + key] for key in kwargs_decoder.keys(): del kwargs["decoder_" + key] # Load and initialize the encoder and decoder # The distinction between encoder and decoder at the model level is made # by the value of the flag `is_decoder` that we need to set correctly. encoder = kwargs_encoder.pop("model", None) if encoder is None: if encoder_pretrained_model_name_or_path is None: raise ValueError( "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_encoder: encoder_config, kwargs_encoder = AutoConfig.from_pretrained( encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True ) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info( f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model " "from a decoder model. Cross-attention and casual mask are disabled." ) encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_encoder["config"] = encoder_config encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder) decoder = kwargs_decoder.pop("model", None) if decoder is None: if decoder_pretrained_model_name_or_path is None: raise ValueError( "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_decoder: decoder_config, kwargs_decoder = AutoConfig.from_pretrained( decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True ) if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: logger.info( f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention" f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if" f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers." ) decoder_config.is_decoder = True decoder_config.add_cross_attention = True kwargs_decoder["config"] = decoder_config if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False: logger.warning( f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. " f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, " "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` " "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a " "`decoder_config` to `.from_encoder_decoder_pretrained(...)`" ) decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) # instantiate config with corresponding kwargs config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs) # make sure input & output embeddings is not tied config.tie_word_embeddings = False return cls(encoder=encoder, decoder=decoder, config=config) @add_start_docstrings_to_model_forward(VISION_ENCODER_DECODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" Returns: Examples: ```python >>> from transformers import AutoProcessor, VisionEncoderDecoderModel >>> import requests >>> from PIL import Image >>> import torch >>> processor = AutoProcessor.from_pretrained("microsoft/trocr-base-handwritten") >>> model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten") >>> # load image from the IAM dataset >>> url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB") >>> # training >>> model.config.decoder_start_token_id = processor.tokenizer.cls_token_id >>> model.config.pad_token_id = processor.tokenizer.pad_token_id >>> model.config.vocab_size = model.config.decoder.vocab_size >>> pixel_values = processor(image, return_tensors="pt").pixel_values >>> text = "hello world" >>> labels = processor.tokenizer(text, return_tensors="pt").input_ids >>> outputs = model(pixel_values=pixel_values, labels=labels) >>> loss = outputs.loss >>> # inference (generation) >>> generated_ids = model.generate(pixel_values) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")} kwargs_decoder = { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } if encoder_outputs is None: if pixel_values is None: raise ValueError("You have to specify pixel_values") encoder_outputs = self.encoder( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs_encoder, ) elif isinstance(encoder_outputs, tuple): encoder_outputs = BaseModelOutput(*encoder_outputs) encoder_hidden_states = encoder_outputs[0] # optionally project encoder_hidden_states if ( self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None ): encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) # else: encoder_attention_mask = None if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None): decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, past_key_values=past_key_values, return_dict=return_dict, **kwargs_decoder, ) # Compute loss independent from decoder (as some shift the logits inside them) loss = None if labels is not None: logits = decoder_outputs.logits if return_dict else decoder_outputs[0] loss_fct = CrossEntropyLoss() loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.reshape(-1)) if not return_dict: if loss is not None: return (loss,) + decoder_outputs + encoder_outputs else: return decoder_outputs + encoder_outputs return Seq2SeqLMOutput( loss=loss, logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values) decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None input_dict = { "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "decoder_input_ids": decoder_inputs["input_ids"], "encoder_outputs": encoder_outputs, "past_key_values": decoder_inputs["past_key_values"], "use_cache": use_cache, } return input_dict def resize_token_embeddings(self, *args, **kwargs): raise NotImplementedError( "Resizing the embedding layers via the VisionEncoderDecoderModel directly is not supported.Please use the" " respective methods of the wrapped decoder object (model.decoder.resize_token_embeddings(...))" ) def _reorder_cache(self, past_key_values, beam_idx): # apply decoder cache reordering here return self.decoder._reorder_cache(past_key_values, beam_idx)
transformers/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py/0
{ "file_path": "transformers/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py", "repo_id": "transformers", "token_count": 14444 }
372
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for ViT.""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging logger = logging.get_logger(__name__) class ViTImageProcessor(BaseImageProcessor): r""" Constructs a ViT image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `(size["height"], size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 224, "width": 224} size = get_size_dict(size) self.do_resize = do_resize self.do_rescale = do_rescale self.do_normalize = do_normalize self.size = size self.resample = resample self.rescale_factor = rescale_factor self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after resizing. resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize do_rescale = do_rescale if do_rescale is not None else self.do_rescale do_normalize = do_normalize if do_normalize is not None else self.do_normalize resample = resample if resample is not None else self.resample rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size_dict = get_size_dict(size) images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
transformers/src/transformers/models/vit/image_processing_vit.py/0
{ "file_path": "transformers/src/transformers/models/vit/image_processing_vit.py", "repo_id": "transformers", "token_count": 5684 }
373
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ViT MSN checkpoints from the original repository: https://github.com/facebookresearch/msn""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config, base_model=False): rename_keys = [] for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight")) rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append( (f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias")) rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight")) rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight")) rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias")) # projection layer + position embeddings rename_keys.extend( [ ("module.cls_token", "vit.embeddings.cls_token"), ("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("module.pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("module.norm.weight", "layernorm.weight"), ("module.norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" rename_keys = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config, base_model=False): for i in range(config.num_hidden_layers): if base_model: prefix = "" else: prefix = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[ : config.hidden_size, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[ -config.hidden_size :, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] def remove_classification_head_(state_dict): ignore_keys = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(k, None) def remove_projection_head(state_dict): # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. ignore_keys = [ "module.fc.fc1.weight", "module.fc.fc1.bias", "module.fc.bn1.weight", "module.fc.bn1.bias", "module.fc.bn1.running_mean", "module.fc.bn1.running_var", "module.fc.bn1.num_batches_tracked", "module.fc.fc2.weight", "module.fc.fc2.bias", "module.fc.bn2.weight", "module.fc.bn2.bias", "module.fc.bn2.running_mean", "module.fc.bn2.running_var", "module.fc.bn2.num_batches_tracked", "module.fc.fc3.weight", "module.fc.fc3.bias", ] for k in ignore_keys: state_dict.pop(k, None) def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def convert_vit_msn_checkpoint(checkpoint_url, pytorch_dump_folder_path): config = ViTMSNConfig() config.num_labels = 1000 repo_id = "datasets/huggingface/label-files" filename = "imagenet-1k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} if "s16" in checkpoint_url: config.hidden_size = 384 config.intermediate_size = 1536 config.num_attention_heads = 6 elif "l16" in checkpoint_url: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 config.hidden_dropout_prob = 0.1 elif "b4" in checkpoint_url: config.patch_size = 4 elif "l7" in checkpoint_url: config.patch_size = 7 config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 config.hidden_dropout_prob = 0.1 model = ViTMSNModel(config) state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["target_encoder"] image_processor = ViTImageProcessor(size=config.image_size) remove_projection_head(state_dict) rename_keys = create_rename_keys(config, base_model=True) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, base_model=True) model.load_state_dict(state_dict) model.eval() url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) image_processor = ViTImageProcessor( size=config.image_size, image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD ) inputs = image_processor(images=image, return_tensors="pt") # forward pass torch.manual_seed(2) outputs = model(**inputs) last_hidden_state = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: expected_slice = torch.tensor([[-1.0915, -1.4876, -1.1809]]) elif "b16" in checkpoint_url: expected_slice = torch.tensor([[14.2889, -18.9045, 11.7281]]) elif "l16" in checkpoint_url: expected_slice = torch.tensor([[41.5028, -22.8681, 45.6475]]) elif "b4" in checkpoint_url: expected_slice = torch.tensor([[-4.3868, 5.2932, -0.4137]]) else: expected_slice = torch.tensor([[-0.1792, -0.6465, 2.4263]]) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3], expected_slice, atol=1e-4) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar", type=str, help="URL of the checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) args = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
transformers/src/transformers/models/vit_msn/convert_msn_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/vit_msn/convert_msn_to_pytorch.py", "repo_id": "transformers", "token_count": 4263 }
374
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ViViT model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/vivit-b-16x2-kinetics400": ( "https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class VivitConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VivitModel`]. It is used to instantiate a ViViT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ViViT [google/vivit-b-16x2-kinetics400](https://huggingface.co/google/vivit-b-16x2-kinetics400) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. num_frames (`int`, *optional*, defaults to 32): The number of frames in each video. tubelet_size (`List[int]`, *optional*, defaults to `[2, 16, 16]`): The size (resolution) of each tubelet. num_channels (`int`, *optional*, defaults to 3): The number of input channels. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_fast"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"`, `"gelu_fast"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. Example: ```python >>> from transformers import VivitConfig, VivitModel >>> # Initializing a ViViT google/vivit-b-16x2-kinetics400 style configuration >>> configuration = VivitConfig() >>> # Initializing a model (with random weights) from the google/vivit-b-16x2-kinetics400 style configuration >>> model = VivitModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "vivit" def __init__( self, image_size=224, num_frames=32, tubelet_size=[2, 16, 16], num_channels=3, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu_fast", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, qkv_bias=True, **kwargs, ): self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.num_frames = num_frames self.tubelet_size = tubelet_size self.num_channels = num_channels self.qkv_bias = qkv_bias super().__init__(**kwargs)
transformers/src/transformers/models/vivit/configuration_vivit.py/0
{ "file_path": "transformers/src/transformers/models/vivit/configuration_vivit.py", "repo_id": "transformers", "token_count": 2040 }
375
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Wav2Vec2Bert BERT checkpoint.""" import argparse import torch import torchaudio from fairseq2.data import Collater from fairseq2.data.audio import WaveformToFbankConverter from fairseq2.nn.padding import get_seqs_and_padding_mask from seamless_communication.models.conformer_shaw import load_conformer_shaw_model from transformers import ( SeamlessM4TFeatureExtractor, Wav2Vec2BertConfig, Wav2Vec2BertModel, logging, ) logging.set_verbosity_info() logger = logging.get_logger(__name__) wav2vec_convert_list = [ ("encoder_frontend.model_dim_proj", "feature_projection.projection"), ("encoder_frontend.post_extract_layer_norm", "feature_projection.layer_norm"), ("encoder_frontend.pos_encoder.conv", "encoder.pos_conv_embed.conv"), ("encoder.inner.layers", "encoder.layers"), ("encoder.inner_layer_norm", "encoder.layer_norm"), ("encoder.adaptor_layers", "adapter.layers"), ("inner_proj", "intermediate_dense"), ("self_attn.output_proj", "self_attn.linear_out"), ("output_proj", "output_dense"), ("self_attn.k_proj", "self_attn.linear_k"), ("self_attn.v_proj", "self_attn.linear_v"), ("self_attn.q_proj", "self_attn.linear_q"), ("self_attn.sdpa.u_bias", "self_attn.pos_bias_u"), ("self_attn.sdpa.v_bias", "self_attn.pos_bias_v"), ("self_attn.sdpa.rel_k_embed", "self_attn.distance_embedding"), ("self_attn.sdpa.r_proj", "self_attn.linear_pos"), ("conv.pointwise_conv1", "conv_module.pointwise_conv1"), ("conv.pointwise_conv2", "conv_module.pointwise_conv2"), ("conv.depthwise_conv", "conv_module.depthwise_conv"), ("conv.layer_norm", "conv_module.depthwise_layer_norm"), ("conv_layer_norm", "conv_module.layer_norm"), ("encoder.proj1", "intermediate_ffn.intermediate_dense"), ("encoder.proj2", "intermediate_ffn.output_dense"), ("encoder.layer_norm", "inner_layer_norm"), ("masker.temporal_mask_embed", "masked_spec_embed"), ] keys_to_remove = { "quantizer.entry_proj", "final_proj", "final_target_proj", "quantizer.entries", "quantizer.num_updates", } def param_count(model): return sum(p[1].numel() for p in model.named_parameters() if "final_proj" not in p[0]) def _convert_model( original_model, hf_model, convert_list, ): state_dict = original_model.state_dict() for k, v in list(state_dict.items()): new_key = k for old_layer_name, new_layer_name in convert_list: if old_layer_name in new_key: new_key = new_key.replace(old_layer_name, new_layer_name) # must do it by hand if ".layer_norm" in new_key and new_key.split(".layer_norm")[0][-1].isnumeric(): new_key = new_key.replace("layer_norm", "final_layer_norm") add_key = True for key in keys_to_remove: if key in new_key: state_dict.pop(k) add_key = False break if add_key: state_dict[new_key] = state_dict.pop(k) extra_keys = set(state_dict.keys()) - set(hf_model.state_dict().keys()) extra_keys = set({k for k in extra_keys if "num_updates" not in k}) # filter unecessary param missing_keys = set(hf_model.state_dict().keys()) - set(state_dict.keys()) if len(extra_keys) != 0: raise ValueError(f"extra keys found: {extra_keys}") if len(missing_keys) != 0: raise ValueError(f"missing keys: {missing_keys}") hf_model.load_state_dict(state_dict, strict=True) n_params = param_count(hf_model) logger.info(f"model loaded: {round(n_params/1e6,1)}M params") hf_model.eval() del state_dict return hf_model @torch.no_grad() def convert_wav2vec2_bert_checkpoint( checkpoint_path, pytorch_dump_folder_path, config_path=None, repo_id=None, ): """ Copy/paste/tweak model's weights to transformers design. """ if config_path is not None: config = Wav2Vec2BertConfig.from_pretrained(config_path, hidden_act="swish") else: config = Wav2Vec2BertConfig(apply_spec_augment=False) hf_wav2vec = Wav2Vec2BertModel(config) model = load_conformer_shaw_model(checkpoint_path, dtype=torch.float32) model.eval() hf_wav2vec = _convert_model(model, hf_wav2vec, wav2vec_convert_list) hf_wav2vec.save_pretrained(pytorch_dump_folder_path) if repo_id: hf_wav2vec.push_to_hub(repo_id, create_pr=True) # save feature extractor fe = SeamlessM4TFeatureExtractor(padding_value=1) fe._set_processor_class("Wav2Vec2BertProcessor") fe.save_pretrained(pytorch_dump_folder_path) if repo_id: fe.push_to_hub(repo_id, create_pr=True) if args.audio_path: waveform, sample_rate = torchaudio.load(args.audio_path) waveform = torchaudio.functional.resample(waveform, sample_rate, fe.sampling_rate) fbank_converter = WaveformToFbankConverter( num_mel_bins=80, waveform_scale=2**15, channel_last=True, standardize=True, dtype=torch.float32, ) collater = Collater(pad_value=1) decoded_audio = {"waveform": waveform.T, "sample_rate": fe.sampling_rate, "format": -1} src = collater(fbank_converter(decoded_audio))["fbank"] seqs, padding_mask = get_seqs_and_padding_mask(src) with torch.inference_mode(): seqs, padding_mask = model.encoder_frontend(seqs, padding_mask) original_output, padding_mask = model.encoder(seqs, padding_mask) hf_wav2vec.eval() inputs = fe(waveform, return_tensors="pt", padding=True) with torch.no_grad(): outputs = hf_wav2vec(**inputs) torch.testing.assert_close(original_output, outputs.last_hidden_state, atol=5e-3, rtol=5e-3) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.", ) parser.add_argument( "--checkpoint_path", default="conformer_shaw", type=str, help="Path to seamless communication checkpoint" ) parser.add_argument( "--config_path", default=None, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--repo_id", default=None, type=str, help="Push to this repo id if precised.") parser.add_argument( "--audio_path", default=None, type=str, help="If specified, check that the original model and the converted model produce the same outputs.", ) args = parser.parse_args() convert_wav2vec2_bert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.repo_id )
transformers/src/transformers/models/wav2vec2_bert/convert_wav2vec2_seamless_checkpoint.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2_bert/convert_wav2vec2_seamless_checkpoint.py", "repo_id": "transformers", "token_count": 3156 }
376
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _import_structure = { "configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"], "feature_extraction_whisper": ["WhisperFeatureExtractor"], "processing_whisper": ["WhisperProcessor"], "tokenization_whisper": ["WhisperTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_whisper_fast"] = ["WhisperTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_whisper"] = [ "WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "WhisperForCausalLM", "WhisperForConditionalGeneration", "WhisperModel", "WhisperPreTrainedModel", "WhisperForAudioClassification", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_whisper"] = [ "TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWhisperForConditionalGeneration", "TFWhisperModel", "TFWhisperPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_whisper"] = [ "FlaxWhisperForConditionalGeneration", "FlaxWhisperModel", "FlaxWhisperPreTrainedModel", "FlaxWhisperForAudioClassification", ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForCausalLM, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/whisper/__init__.py/0
{ "file_path": "transformers/src/transformers/models/whisper/__init__.py", "repo_id": "transformers", "token_count": 1768 }
377
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Image/Text processor class for XCLIP """ import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class XCLIPProcessor(ProcessorMixin): r""" Constructs an X-CLIP processor which wraps a VideoMAE image processor and a CLIP tokenizer into a single processor. [`XCLIPProcessor`] offers all the functionalities of [`VideoMAEImageProcessor`] and [`CLIPTokenizerFast`]. See the [`~XCLIPProcessor.__call__`] and [`~XCLIPProcessor.decode`] for more information. Args: image_processor ([`VideoMAEImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`CLIPTokenizerFast`], *optional*): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "VideoMAEImageProcessor" tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor def __call__(self, text=None, videos=None, return_tensors=None, **kwargs): """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `videos` and `kwargs` arguments to VideoMAEImageProcessor's [`~VideoMAEImageProcessor.__call__`] if `videos` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). videos (`List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`, `List[List[PIL.Image.Image]]`, `List[List[np.ndarrray]]`,: `List[List[torch.Tensor]]`): The video or batch of videos to be prepared. Each video should be a list of frames, which can be either PIL images or NumPy arrays. In case of NumPy arrays/PyTorch tensors, each frame should be of shape (H, W, C), where H and W are frame height and width, and C is a number of channels. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `videos` is not `None`. """ if text is None and videos is None: raise ValueError("You have to specify either text or videos. Both cannot be none.") if text is not None: encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) if videos is not None: image_features = self.image_processor(videos, return_tensors=return_tensors, **kwargs) if text is not None and videos is not None: encoding["pixel_values"] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): return ["input_ids", "attention_mask", "position_ids", "pixel_values"] @property def feature_extractor_class(self): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", FutureWarning, ) return self.image_processor_class @property def feature_extractor(self): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", FutureWarning, ) return self.image_processor
transformers/src/transformers/models/x_clip/processing_x_clip.py/0
{ "file_path": "transformers/src/transformers/models/x_clip/processing_x_clip.py", "repo_id": "transformers", "token_count": 2662 }
378
# coding=utf-8 # Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ XLM-ProphetNet model configuration""" from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json" ), } class XLMProphetNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`XLMProphetNetModel`]. It is used to instantiate a XLMProphetNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the XLMProphetNet [microsoft/xprophetnet-large-wiki100-cased](https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the ProphetNET model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`XLMProphetNetModel`]. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. num_encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. num_encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the `intermediate` (often named feed-forward) layer in decoder. num_decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. num_decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. add_cross_attention (`bool`, *optional*, defaults to `True`): Whether cross-attention layers should be added to the model. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether this is an encoder/decoder model. pad_token_id (`int`, *optional*, defaults to 1) Padding token id. bos_token_id (`int`, *optional*, defaults to 0) Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2) End of stream token id. ngram (`int`, *optional*, defaults to 2) Number of future tokens to predict. Set to 1 to be same as traditional Language model to predict next first token. num_buckets (`int`, *optional*, defaults to 32) The number of buckets to use for each attention layer. This is for relative position calculation. See the [T5 paper](see https://arxiv.org/abs/1910.10683) for more details. relative_max_distance (`int`, *optional*, defaults to 128) Relative distances greater than this number will be put into the last same bucket. This is for relative position calculation. See the [T5 paper](see https://arxiv.org/abs/1910.10683) for more details. disable_ngram_loss (`bool`, *optional*, defaults to `False`): Whether be trained predicting only the next first token. eps (`float`, *optional*, defaults to 0.0): Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ model_type = "xlm-prophetnet" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_attention_heads": "num_encoder_attention_heads", } def __init__( self, activation_dropout: Optional[float] = 0.1, activation_function: Optional[Union[str, Callable]] = "gelu", vocab_size: Optional[int] = 30522, hidden_size: Optional[int] = 1024, encoder_ffn_dim: Optional[int] = 4096, num_encoder_layers: Optional[int] = 12, num_encoder_attention_heads: Optional[int] = 16, decoder_ffn_dim: Optional[int] = 4096, num_decoder_layers: Optional[int] = 12, num_decoder_attention_heads: Optional[int] = 16, attention_dropout: Optional[float] = 0.1, dropout: Optional[float] = 0.1, max_position_embeddings: Optional[int] = 512, init_std: Optional[float] = 0.02, is_encoder_decoder: Optional[bool] = True, add_cross_attention: Optional[bool] = True, decoder_start_token_id: Optional[int] = 0, ngram: Optional[int] = 2, num_buckets: Optional[int] = 32, relative_max_distance: Optional[int] = 128, disable_ngram_loss: Optional[bool] = False, eps: Optional[float] = 0.0, use_cache: Optional[bool] = True, pad_token_id: Optional[int] = 0, bos_token_id: Optional[int] = 1, eos_token_id: Optional[int] = 2, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.encoder_ffn_dim = encoder_ffn_dim self.num_encoder_layers = num_encoder_layers self.num_encoder_attention_heads = num_encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.num_decoder_layers = num_decoder_layers self.num_decoder_attention_heads = num_decoder_attention_heads self.max_position_embeddings = max_position_embeddings self.init_std = init_std # Normal(0, this parameter) self.activation_function = activation_function # parameters for xlmprophetnet self.ngram = ngram self.num_buckets = num_buckets self.relative_max_distance = relative_max_distance self.disable_ngram_loss = disable_ngram_loss self.eps = eps # 3 Types of Dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.dropout = dropout self.use_cache = use_cache super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, add_cross_attention=add_cross_attention, decoder_start_token_id=decoder_start_token_id, **kwargs, ) @property def num_hidden_layers(self) -> int: return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def num_hidden_layers(self, value): raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`." )
transformers/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py/0
{ "file_path": "transformers/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py", "repo_id": "transformers", "token_count": 3551 }
379
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert BERT checkpoint.""" import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging GLUE_TASKS_NUM_LABELS = { "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def convert_xlnet_checkpoint_to_pytorch( tf_checkpoint_path, bert_config_file, pytorch_dump_folder_path, finetuning_task=None ): # Initialise PyTorch model config = XLNetConfig.from_json_file(bert_config_file) finetuning_task = finetuning_task.lower() if finetuning_task is not None else "" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}") config.finetuning_task = finetuning_task config.num_labels = GLUE_TASKS_NUM_LABELS[finetuning_task] model = XLNetForSequenceClassification(config) elif "squad" in finetuning_task: config.finetuning_task = finetuning_task model = XLNetForQuestionAnswering(config) else: model = XLNetLMHeadModel(config) # Load weights from tf checkpoint load_tf_weights_in_xlnet(model, config, tf_checkpoint_path) # Save pytorch-model pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME) pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME) print(f"Save PyTorch model to {os.path.abspath(pytorch_weights_dump_path)}") torch.save(model.state_dict(), pytorch_weights_dump_path) print(f"Save configuration file to {os.path.abspath(pytorch_config_dump_path)}") with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(config.to_json_string()) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) args = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
transformers/src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 1468 }
380
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ YOSO model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP = { "uw-madison/yoso-4096": "https://huggingface.co/uw-madison/yoso-4096/resolve/main/config.json", # See all YOSO models at https://huggingface.co/models?filter=yoso } class YosoConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`YosoModel`]. It is used to instantiate an YOSO model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the YOSO [uw-madison/yoso-4096](https://huggingface.co/uw-madison/yoso-4096) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the YOSO model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`YosoModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`YosoModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. use_expectation (`bool`, *optional*, defaults to `True`): Whether or not to use YOSO Expectation. Overrides any effect of num_hash. hash_code_len (`int`, *optional*, defaults to 9): The length of hashes generated by the hash functions. num_hash (`int`, *optional*, defaults to 64): Number of hash functions used in [`YosoSelfAttention`]. conv_window (`int`, *optional*): Kernel size of depth-wise convolution. use_fast_hash (`bool`, *optional*, defaults to `False`): Whether or not to use custom cuda kernels which perform fast random projection via hadamard transform. lsh_backward (`bool`, *optional*, defaults to `True`): Whether or not to perform backpropagation using Locality Sensitive Hashing. Example: ```python >>> from transformers import YosoConfig, YosoModel >>> # Initializing a YOSO uw-madison/yoso-4096 style configuration >>> configuration = YosoConfig() >>> # Initializing a model (with random weights) from the uw-madison/yoso-4096 style configuration >>> model = YosoModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "yoso" def __init__( self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=4096, type_vocab_size=1, initializer_range=0.02, layer_norm_eps=1e-12, position_embedding_type="absolute", use_expectation=True, hash_code_len=9, num_hash=64, conv_window=None, use_fast_hash=True, lsh_backward=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_expectation = use_expectation self.hash_code_len = hash_code_len self.num_hash = num_hash self.conv_window = conv_window self.use_fast_hash = use_fast_hash self.lsh_backward = lsh_backward
transformers/src/transformers/models/yoso/configuration_yoso.py/0
{ "file_path": "transformers/src/transformers/models/yoso/configuration_yoso.py", "repo_id": "transformers", "token_count": 2641 }
381
import uuid from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import Pipeline, build_pipeline_init_args if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch logger = logging.get_logger(__name__) class Conversation: """ Utility class containing a conversation and its history. This class is meant to be used as an input to the [`ConversationalPipeline`]. The conversation contains several utility functions to manage the addition of new user inputs and generated model responses. Arguments: messages (Union[str, List[Dict[str, str]]], *optional*): The initial messages to start the conversation, either a string, or a list of dicts containing "role" and "content" keys. If a string is passed, it is interpreted as a single message with the "user" role. conversation_id (`uuid.UUID`, *optional*): Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the conversation. Usage: ```python conversation = Conversation("Going to the movies tonight - any suggestions?") conversation.add_message({"role": "assistant", "content": "The Big lebowski."}) conversation.add_message({"role": "user", "content": "Is it good?"}) ```""" def __init__( self, messages: Union[str, List[Dict[str, str]]] = None, conversation_id: uuid.UUID = None, **deprecated_kwargs ): if not conversation_id: conversation_id = uuid.uuid4() if messages is None: text = deprecated_kwargs.pop("text", None) if text is not None: messages = [{"role": "user", "content": text}] else: messages = [] elif isinstance(messages, str): messages = [{"role": "user", "content": messages}] # This block deals with the legacy args - new code should just totally # avoid past_user_inputs and generated_responses self._num_processed_user_inputs = 0 generated_responses = deprecated_kwargs.pop("generated_responses", None) past_user_inputs = deprecated_kwargs.pop("past_user_inputs", None) if generated_responses is not None and past_user_inputs is None: raise ValueError("generated_responses cannot be passed without past_user_inputs!") if past_user_inputs is not None: legacy_messages = [] if generated_responses is None: generated_responses = [] # We structure it this way instead of using zip() because the lengths may differ by 1 for i in range(max([len(past_user_inputs), len(generated_responses)])): if i < len(past_user_inputs): legacy_messages.append({"role": "user", "content": past_user_inputs[i]}) if i < len(generated_responses): legacy_messages.append({"role": "assistant", "content": generated_responses[i]}) messages = legacy_messages + messages self.uuid = conversation_id self.messages = messages def __eq__(self, other): if not isinstance(other, Conversation): return False return self.uuid == other.uuid or self.messages == other.messages def add_message(self, message: Dict[str, str]): if not set(message.keys()) == {"role", "content"}: raise ValueError("Message should contain only 'role' and 'content' keys!") if message["role"] not in ("user", "assistant", "system"): raise ValueError("Only 'user', 'assistant' and 'system' roles are supported for now!") self.messages.append(message) def add_user_input(self, text: str, overwrite: bool = False): """ Add a user input to the conversation for the next round. This is a legacy method that assumes that inputs must alternate user/assistant/user/assistant, and so will not add multiple user messages in succession. We recommend just using `add_message` with role "user" instead. """ if len(self) > 0 and self[-1]["role"] == "user": if overwrite: logger.warning( f'User input added while unprocessed input was existing: "{self[-1]["content"]}" was overwritten ' f'with: "{text}".' ) self[-1]["content"] = text else: logger.warning( f'User input added while unprocessed input was existing: "{self[-1]["content"]}" new input ' f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' ) else: self.messages.append({"role": "user", "content": text}) def append_response(self, response: str): """ This is a legacy method. We recommend just using `add_message` with an appropriate role instead. """ self.messages.append({"role": "assistant", "content": response}) def mark_processed(self): """ This is a legacy method, as the Conversation no longer distinguishes between processed and unprocessed user input. We set a counter here to keep behaviour mostly backward-compatible, but in general you should just read the messages directly when writing new code. """ self._num_processed_user_inputs = len(self._user_messages) def __iter__(self): for message in self.messages: yield message def __getitem__(self, item): return self.messages[item] def __setitem__(self, key, value): self.messages[key] = value def __len__(self): return len(self.messages) def __repr__(self): """ Generates a string representation of the conversation. Returns: `str`: Example: Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114 user: Going to the movies tonight - any suggestions? bot: The Big Lebowski """ output = f"Conversation id: {self.uuid}\n" for message in self.messages: output += f"{message['role']}: {message['content']}\n" return output def iter_texts(self): # This is a legacy method for backwards compatibility. It is recommended to just directly access # conversation.messages instead. for message in self.messages: yield message["role"] == "user", message["content"] @property def _user_messages(self): # This is a legacy property for backwards compatibility. It is recommended to just directly access # conversation.messages instead. return [message["content"] for message in self.messages if message["role"] == "user"] @property def past_user_inputs(self): # This is a legacy property for backwards compatibility. It is recommended to just directly access # conversation.messages instead. The modern class does not care about which messages are "processed" # or not. if not self._user_messages: return [] # In the past, the most recent user message had to be mark_processed() before being included # in past_user_messages. The class essentially had a single-message buffer, representing messages that # had not yet been replied to. This is no longer the case, but we mimic the behaviour in this property # for backward compatibility. if self.messages[-1]["role"] != "user" or self._num_processed_user_inputs == len(self._user_messages): return self._user_messages return self._user_messages[:-1] @property def generated_responses(self): # This is a legacy property for backwards compatibility. It is recommended to just directly access # conversation.messages instead. return [message["content"] for message in self.messages if message["role"] == "assistant"] @property def new_user_input(self): # This is a legacy property for backwards compatibility. It is recommended to just directly access # conversation.messages instead. return self._user_messages[-1] @add_end_docstrings( build_pipeline_init_args(has_tokenizer=True), r""" min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response.""", ) class ConversationalPipeline(Pipeline): """ Multi-turn conversational pipeline. Example: ```python >>> from transformers import pipeline, Conversation # Any model with a chat template can be used in a ConversationalPipeline. >>> chatbot = pipeline(model="facebook/blenderbot-400M-distill") >>> # Conversation objects initialized with a string will treat it as a user message >>> conversation = Conversation("I'm looking for a movie - what's your favourite one?") >>> conversation = chatbot(conversation) >>> conversation.messages[-1]["content"] "I don't really have a favorite movie, but I do like action movies. What about you?" >>> conversation.add_message({"role": "user", "content": "That's interesting, why do you like action movies?"}) >>> conversation = chatbot(conversation) >>> conversation.messages[-1]["content"] " I think it's just because they're so fast-paced and action-fantastic." ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This conversational pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"conversational"`. This pipeline can be used with any model that has a [chat template](https://huggingface.co/docs/transformers/chat_templating) set. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.tokenizer.pad_token_id is None: self.tokenizer.pad_token = self.tokenizer.eos_token def _sanitize_parameters( self, min_length_for_response=None, minimum_tokens=None, clean_up_tokenization_spaces=None, **generate_kwargs ): preprocess_params = {} forward_params = {} postprocess_params = {} if min_length_for_response is not None: preprocess_params["min_length_for_response"] = min_length_for_response if minimum_tokens is not None: forward_params["minimum_tokens"] = minimum_tokens if "max_length" in generate_kwargs: forward_params["max_length"] = generate_kwargs["max_length"] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(generate_kwargs) return preprocess_params, forward_params, postprocess_params def __call__(self, conversations: Union[List[Dict], Conversation, List[Conversation]], num_workers=0, **kwargs): r""" Generate responses for the conversation(s) given as inputs. Args: conversations (a [`Conversation`] or a list of [`Conversation`]): Conversation to generate responses for. Inputs can also be passed as a list of dictionaries with `role` and `content` keys - in this case, they will be converted to `Conversation` objects automatically. Multiple conversations in either format may be passed as a list. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the potential extra spaces in the text output. generate_kwargs: Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework [here](./model#generative-models)). Returns: [`Conversation`] or a list of [`Conversation`]: Conversation(s) with updated generated responses for those containing a new user input. """ # XXX: num_workers==0 is required to be backward compatible # Otherwise the threads will require a Conversation copy. # This will definitely hinder performance on GPU, but has to be opted # in because of this BC change. if isinstance(conversations, list) and isinstance(conversations[0], dict): conversations = Conversation(conversations) elif isinstance(conversations, list) and isinstance(conversations[0], list): conversations = [Conversation(conv) for conv in conversations] outputs = super().__call__(conversations, num_workers=num_workers, **kwargs) if isinstance(outputs, list) and len(outputs) == 1: return outputs[0] return outputs def preprocess(self, conversation: Conversation, min_length_for_response=32) -> Dict[str, Any]: input_ids = self.tokenizer.apply_chat_template(conversation, add_generation_prompt=True) if self.framework == "pt": input_ids = torch.LongTensor([input_ids]) elif self.framework == "tf": input_ids = tf.constant([input_ids]) return {"input_ids": input_ids, "conversation": conversation} def _forward(self, model_inputs, minimum_tokens=10, **generate_kwargs): n = model_inputs["input_ids"].shape[1] conversation = model_inputs.pop("conversation") if "max_length" not in generate_kwargs and "max_new_tokens" not in generate_kwargs: generate_kwargs["max_new_tokens"] = 256 output_ids = self.model.generate(**model_inputs, **generate_kwargs) if self.model.config.is_encoder_decoder: start_position = 1 else: start_position = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def postprocess(self, model_outputs, clean_up_tokenization_spaces=True): output_ids = model_outputs["output_ids"] answer = self.tokenizer.decode( output_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces, ) conversation = model_outputs["conversation"] conversation.add_message({"role": "assistant", "content": answer}) return conversation
transformers/src/transformers/pipelines/conversational.py/0
{ "file_path": "transformers/src/transformers/pipelines/conversational.py", "repo_id": "transformers", "token_count": 5596 }
382
import enum import warnings from ..utils import add_end_docstrings, is_tf_available, is_torch_available from .base import Pipeline, build_pipeline_init_args if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES class ReturnType(enum.Enum): TENSORS = 0 NEW_TEXT = 1 FULL_TEXT = 2 @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) class TextGenerationPipeline(Pipeline): """ Language generation pipeline using any `ModelWithLMHead`. This pipeline predicts the words that will follow a specified text prompt. Example: ```python >>> from transformers import pipeline >>> generator = pipeline(model="gpt2") >>> generator("I can't believe you did such a ", do_sample=False) [{'generated_text': "I can't believe you did such a icky thing to me. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I"}] >>> # These parameters will return suggestions, and only the newly created text making it easier for prompting suggestions. >>> outputs = generator("My tart needs some", num_return_sequences=4, return_full_text=False) ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about text generation parameters in [Text generation strategies](../generation_strategies) and [Text generation](text_generation). This language generation pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"text-generation"`. The models that this pipeline can use are models that have been trained with an autoregressive language modeling objective, which includes the uni-directional models in the library (e.g. gpt2). See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=text-generation). """ # Prefix text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia # in https://github.com/rusiaaman/XLNet-gen#methodology # and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e XL_PREFIX = """ In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING_NAMES ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. prefix = None if self.model.config.prefix is not None: prefix = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. prefix = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. preprocess_params, forward_params, _ = self._sanitize_parameters(prefix=prefix, **self._forward_params) self._preprocess_params = {**self._preprocess_params, **preprocess_params} self._forward_params = {**self._forward_params, **forward_params} def _sanitize_parameters( self, return_full_text=None, return_tensors=None, return_text=None, return_type=None, clean_up_tokenization_spaces=None, prefix=None, handle_long_generation=None, stop_sequence=None, add_special_tokens=False, truncation=None, padding=False, max_length=None, **generate_kwargs, ): preprocess_params = { "add_special_tokens": add_special_tokens, "truncation": truncation, "padding": padding, "max_length": max_length, } if max_length is not None: generate_kwargs["max_length"] = max_length if prefix is not None: preprocess_params["prefix"] = prefix if prefix: prefix_inputs = self.tokenizer( prefix, padding=False, add_special_tokens=add_special_tokens, return_tensors=self.framework ) generate_kwargs["prefix_length"] = prefix_inputs["input_ids"].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" " [None, 'hole']" ) preprocess_params["handle_long_generation"] = handle_long_generation preprocess_params.update(generate_kwargs) forward_params = generate_kwargs postprocess_params = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError("`return_text` is mutually exclusive with `return_full_text`") if return_tensors is not None: raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`") return_type = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError("`return_text` is mutually exclusive with `return_tensors`") return_type = ReturnType.TENSORS if return_type is not None: postprocess_params["return_type"] = return_type if clean_up_tokenization_spaces is not None: postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces if stop_sequence is not None: stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False) if len(stop_sequence_ids) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) generate_kwargs["eos_token_id"] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params # overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments def _parse_and_tokenize(self, *args, **kwargs): """ Parse arguments and tokenize """ # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({"add_space_before_punct_symbol": True}) return super()._parse_and_tokenize(*args, **kwargs) def __call__(self, text_inputs, **kwargs): """ Complete the prompt(s) given as inputs. Args: args (`str` or `List[str]`): One or several prompts (or one list of prompts) to complete. return_tensors (`bool`, *optional*, defaults to `False`): Whether or not to return the tensors of predictions (as token indices) in the outputs. If set to `True`, the decoded text is not returned. return_text (`bool`, *optional*, defaults to `True`): Whether or not to return the decoded texts in the outputs. return_full_text (`bool`, *optional*, defaults to `True`): If set to `False` only added text is returned, otherwise the full text is returned. Only meaningful if *return_text* is set to True. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the potential extra spaces in the text output. prefix (`str`, *optional*): Prefix added to prompt. handle_long_generation (`str`, *optional*): By default, this pipelines does not handle long generation (ones that exceed in one form or the other the model maximum length). There is no perfect way to adress this (more info :https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227). This provides common strategies to work around that problem depending on your use case. - `None` : default strategy where nothing in particular happens - `"hole"`: Truncates left of input, and leaves a gap wide enough to let generation happen (might truncate a lot of the prompt and not suitable when generation exceed the model capacity) generate_kwargs: Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework [here](./model#generative-models)). Return: A list or a list of list of `dict`: Returns one of the following dictionaries (cannot return a combination of both `generated_text` and `generated_token_ids`): - **generated_text** (`str`, present when `return_text=True`) -- The generated text. - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token ids of the generated text. """ return super().__call__(text_inputs, **kwargs) def preprocess( self, prompt_text, prefix="", handle_long_generation=None, add_special_tokens=False, truncation=None, padding=False, max_length=None, **generate_kwargs, ): inputs = self.tokenizer( prefix + prompt_text, return_tensors=self.framework, truncation=truncation, padding=padding, max_length=max_length, add_special_tokens=add_special_tokens, ) inputs["prompt_text"] = prompt_text if handle_long_generation == "hole": cur_len = inputs["input_ids"].shape[-1] if "max_new_tokens" in generate_kwargs: new_tokens = generate_kwargs["max_new_tokens"] else: new_tokens = generate_kwargs.get("max_length", self.model.config.max_length) - cur_len if new_tokens < 0: raise ValueError("We cannot infer how many new tokens are expected") if cur_len + new_tokens > self.tokenizer.model_max_length: keep_length = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( "We cannot use `hole` to handle this generation the number of desired tokens exceeds the" " models max length" ) inputs["input_ids"] = inputs["input_ids"][:, -keep_length:] if "attention_mask" in inputs: inputs["attention_mask"] = inputs["attention_mask"][:, -keep_length:] return inputs def _forward(self, model_inputs, **generate_kwargs): input_ids = model_inputs["input_ids"] attention_mask = model_inputs.get("attention_mask", None) # Allow empty prompts if input_ids.shape[1] == 0: input_ids = None attention_mask = None in_b = 1 else: in_b = input_ids.shape[0] prompt_text = model_inputs.pop("prompt_text") # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. prefix_length = generate_kwargs.pop("prefix_length", 0) if prefix_length > 0: has_max_new_tokens = "max_new_tokens" in generate_kwargs or ( "generation_config" in generate_kwargs and generate_kwargs["generation_config"].max_new_tokens is not None ) if not has_max_new_tokens: generate_kwargs["max_length"] = generate_kwargs.get("max_length") or self.model.config.max_length generate_kwargs["max_length"] += prefix_length has_min_new_tokens = "min_new_tokens" in generate_kwargs or ( "generation_config" in generate_kwargs and generate_kwargs["generation_config"].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL generated_sequence = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, **generate_kwargs) out_b = generated_sequence.shape[0] if self.framework == "pt": generated_sequence = generated_sequence.reshape(in_b, out_b // in_b, *generated_sequence.shape[1:]) elif self.framework == "tf": generated_sequence = tf.reshape(generated_sequence, (in_b, out_b // in_b, *generated_sequence.shape[1:])) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def postprocess(self, model_outputs, return_type=ReturnType.FULL_TEXT, clean_up_tokenization_spaces=True): generated_sequence = model_outputs["generated_sequence"][0] input_ids = model_outputs["input_ids"] prompt_text = model_outputs["prompt_text"] generated_sequence = generated_sequence.numpy().tolist() records = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: record = {"generated_token_ids": sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text text = self.tokenizer.decode( sequence, skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces, ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: prompt_length = 0 else: prompt_length = len( self.tokenizer.decode( input_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces, ) ) all_text = text[prompt_length:] if return_type == ReturnType.FULL_TEXT: all_text = prompt_text + all_text record = {"generated_text": all_text} records.append(record) return records
transformers/src/transformers/pipelines/text_generation.py/0
{ "file_path": "transformers/src/transformers/pipelines/text_generation.py", "repo_id": "transformers", "token_count": 6925 }
383
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib from typing import TYPE_CHECKING, Any, Dict, List, Union from packaging import version from .base import HfQuantizer if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel from ..utils import is_accelerate_available, is_bitsandbytes_available, is_torch_available, logging from .quantizers_utils import get_module_from_name if is_torch_available(): import torch from ..pytorch_utils import Conv1D logger = logging.get_logger(__name__) class Bnb8BitHfQuantizer(HfQuantizer): """ 8-bit quantization from bitsandbytes quantization method: before loading: converts transformer layers into Linear8bitLt during loading: load 16bit weight and pass to the layer object after: quantizes individual weights in Linear8bitLt into 8bit at fitst .cuda() call saving: from state dict, as usual; saves weights and 'SCB' component loading: need to locate SCB component and pass to the Linear8bitLt object """ use_keep_in_fp32_modules = True requires_parameters_quantization = True requires_calibration = False required_packages = ["bitsandbytes", "accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) if self.quantization_config.llm_int8_skip_modules is not None: self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules def validate_environment(self, *args, **kwargs): if not (is_accelerate_available() and is_bitsandbytes_available()): raise ImportError( "Using `bitsandbytes` 8-bit quantization requires Accelerate: `pip install accelerate` " "and the latest version of bitsandbytes: `pip install -i https://pypi.org/simple/ bitsandbytes`" ) if kwargs.get("from_tf", False) or kwargs.get("from_flax", False): raise ValueError( "Converting into 4-bit or 8-bit weights from tf/flax weights is currently not supported, please make" " sure the weights are in PyTorch format." ) if not torch.cuda.is_available(): raise RuntimeError("No GPU found. A GPU is needed for quantization.") device_map = kwargs.get("device_map", None) if ( device_map is not None and isinstance(device_map, dict) and not self.quantization_config.llm_int8_enable_fp32_cpu_offload ): device_map_without_lm_head = { key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert } if "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values(): raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in 32-bit, you need to set `load_in_8bit_fp32_cpu_offload=True` and pass a custom `device_map` to `from_pretrained`. Check https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu for more details. """ ) if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.2"): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 8bit inference and training" " make sure you have the latest version of `bitsandbytes` installed" ) def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: # need more space for buffers that are created during quantization max_memory = {key: val * 0.90 for key, val in max_memory.items()} return max_memory def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: # We force the `dtype` to be float16, this is a requirement from `bitsandbytes` logger.info( "Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to " "requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. " "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass" " torch_dtype=torch.float16 to remove this warning.", torch_dtype, ) torch_dtype = torch.float16 return torch_dtype def update_device_map(self, device_map): if device_map is None: device_map = {"": torch.cuda.current_device()} logger.info( "The device_map was not initialized. " "Setting device_map to {'':torch.cuda.current_device()}. " "If you want to use the model for inference, please set device_map ='auto' " ) return device_map def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": if target_dtype != torch.int8: logger.info("target_dtype {target_dtype} is replaced by `torch.int8` for 8-bit BnB quantization") return torch.int8 def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any] ): import bitsandbytes as bnb module, tensor_name = get_module_from_name(model, param_name) if isinstance(module._parameters[tensor_name], bnb.nn.Int8Params): if self.pre_quantized: if param_name.replace("weight", "SCB") not in state_dict.keys(): raise ValueError("Missing quantization component `SCB`") if param_value.dtype != torch.int8: raise ValueError( f"Incompatible dtype `{param_value.dtype}` when loading 8-bit prequantized weight. Expected `torch.int8`." ) return True return False def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: Dict[str, Any], unexpected_keys: List[str], ): """ combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device() needs aux items from state dicts, if found - removes them from unexpected_keys """ import bitsandbytes as bnb fp16_statistics_key = param_name.replace("weight", "SCB") fp16_statistics = state_dict.get(fp16_statistics_key, None) module, tensor_name = get_module_from_name(model, param_name) if tensor_name not in module._parameters: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") old_value = getattr(module, tensor_name) if not isinstance(module._parameters[tensor_name], bnb.nn.Int8Params): raise ValueError(f"Parameter `{tensor_name}` should only be a `bnb.nn.Int8Params` instance.") if ( old_value.device == torch.device("meta") and target_device not in ["meta", torch.device("meta")] and param_value is None ): raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.") new_value = param_value.to("cpu") if self.pre_quantized and not self.is_serializable: raise ValueError( "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. " "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls, Conv1D): if fp16_statistics is None: new_value = new_value.T kwargs = old_value.__dict__ new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(target_device) module._parameters[tensor_name] = new_value if fp16_statistics is not None: setattr(module.weight, "SCB", fp16_statistics.to(target_device)) unexpected_keys.remove(fp16_statistics_key) def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): model._is_quantized_training_enabled = self.is_trainable model.is_loaded_in_8bit = True model.is_8bit_serializable = self.is_serializable return model def _process_model_before_weight_loading( self, model: "PreTrainedModel", device_map, keep_in_fp32_modules: List[str] = [], **kwargs, ): from ..integrations import get_keys_to_not_convert, replace_with_bnb_linear load_in_8bit_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if self.quantization_config.llm_int8_skip_modules is None: self.modules_to_not_convert = get_keys_to_not_convert(model) else: self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules if not isinstance(self.modules_to_not_convert, list): self.modules_to_not_convert = [self.modules_to_not_convert] self.modules_to_not_convert.extend(keep_in_fp32_modules) # Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk` if isinstance(device_map, dict) and len(device_map.keys()) > 1: keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] if len(keys_on_cpu) > 0 and not load_in_8bit_fp32_cpu_offload: raise ValueError( "If you want to offload some keys to `cpu` or `disk`, you need to set " "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be " " converted to 8-bit but kept in 32-bit." ) self.modules_to_not_convert.extend(keys_on_cpu) model = replace_with_bnb_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config ) # TODO: consider bringing replace_with_bnb_linear() code from ..integrations/bitsandbyter.py to here model.config.quantization_config = self.quantization_config @property def is_serializable(self): _bnb_supports_8bit_serialization = version.parse(importlib.metadata.version("bitsandbytes")) > version.parse( "0.37.2" ) if not _bnb_supports_8bit_serialization: logger.warning( "You are calling `save_pretrained` to a 8-bit converted model, but your `bitsandbytes` version doesn't support it. " "If you want to save 8-bit models, make sure to have `bitsandbytes>0.37.2` installed. You will most likely face errors or" " unexpected behaviours." ) return False return True @property def is_trainable(self) -> bool: return version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.37.0")
transformers/src/transformers/quantizers/quantizer_bnb_8bit.py/0
{ "file_path": "transformers/src/transformers/quantizers/quantizer_bnb_8bit.py", "repo_id": "transformers", "token_count": 5225 }
384
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import importlib import inspect import io import json import os import tempfile from typing import Any, Dict, List, Optional, Union from huggingface_hub import create_repo, hf_hub_download, metadata_update, upload_folder from huggingface_hub.utils import RepositoryNotFoundError, build_hf_headers, get_session from ..dynamic_module_utils import custom_object_save, get_class_from_dynamic_module, get_imports from ..image_utils import is_pil_image from ..models.auto import AutoProcessor from ..utils import ( CONFIG_NAME, cached_file, is_accelerate_available, is_torch_available, is_vision_available, logging, ) from .agent_types import handle_agent_inputs, handle_agent_outputs logger = logging.get_logger(__name__) if is_torch_available(): import torch if is_accelerate_available(): from accelerate import PartialState from accelerate.utils import send_to_device TOOL_CONFIG_FILE = "tool_config.json" def get_repo_type(repo_id, repo_type=None, **hub_kwargs): if repo_type is not None: return repo_type try: hf_hub_download(repo_id, TOOL_CONFIG_FILE, repo_type="space", **hub_kwargs) return "space" except RepositoryNotFoundError: try: hf_hub_download(repo_id, TOOL_CONFIG_FILE, repo_type="model", **hub_kwargs) return "model" except RepositoryNotFoundError: raise EnvironmentError(f"`{repo_id}` does not seem to be a valid repo identifier on the Hub.") except Exception: return "model" except Exception: return "space" # docstyle-ignore APP_FILE_TEMPLATE = """from transformers import launch_gradio_demo from {module_name} import {class_name} launch_gradio_demo({class_name}) """ class Tool: """ A base class for the functions used by the agent. Subclass this and implement the `__call__` method as well as the following class attributes: - **description** (`str`) -- A short description of what your tool does, the inputs it expects and the output(s) it will return. For instance 'This is a tool that downloads a file from a `url`. It takes the `url` as input, and returns the text contained in the file'. - **name** (`str`) -- A performative name that will be used for your tool in the prompt to the agent. For instance `"text-classifier"` or `"image_generator"`. - **inputs** (`List[str]`) -- The list of modalities expected for the inputs (in the same order as in the call). Modalitiies should be `"text"`, `"image"` or `"audio"`. This is only used by `launch_gradio_demo` or to make a nice space from your tool. - **outputs** (`List[str]`) -- The list of modalities returned but the tool (in the same order as the return of the call method). Modalitiies should be `"text"`, `"image"` or `"audio"`. This is only used by `launch_gradio_demo` or to make a nice space from your tool. You can also override the method [`~Tool.setup`] if your tool as an expensive operation to perform before being usable (such as loading a model). [`~Tool.setup`] will be called the first time you use your tool, but not at instantiation. """ description: str = "This is a tool that ..." name: str = "" inputs: List[str] outputs: List[str] def __init__(self, *args, **kwargs): self.is_initialized = False def __call__(self, *args, **kwargs): return NotImplemented("Write this method in your subclass of `Tool`.") def setup(self): """ Overwrite this method here for any operation that is expensive and needs to be executed before you start using your tool. Such as loading a big model. """ self.is_initialized = True def save(self, output_dir): """ Saves the relevant code files for your tool so it can be pushed to the Hub. This will copy the code of your tool in `output_dir` as well as autogenerate: - a config file named `tool_config.json` - an `app.py` file so that your tool can be converted to a space - a `requirements.txt` containing the names of the module used by your tool (as detected when inspecting its code) You should only use this method to save tools that are defined in a separate module (not `__main__`). Args: output_dir (`str`): The folder in which you want to save your tool. """ os.makedirs(output_dir, exist_ok=True) # Save module file if self.__module__ == "__main__": raise ValueError( f"We can't save the code defining {self} in {output_dir} as it's been defined in __main__. You " "have to put this code in a separate module so we can include it in the saved folder." ) module_files = custom_object_save(self, output_dir) module_name = self.__class__.__module__ last_module = module_name.split(".")[-1] full_name = f"{last_module}.{self.__class__.__name__}" # Save config file config_file = os.path.join(output_dir, "tool_config.json") if os.path.isfile(config_file): with open(config_file, "r", encoding="utf-8") as f: tool_config = json.load(f) else: tool_config = {} tool_config = {"tool_class": full_name, "description": self.description, "name": self.name} with open(config_file, "w", encoding="utf-8") as f: f.write(json.dumps(tool_config, indent=2, sort_keys=True) + "\n") # Save app file app_file = os.path.join(output_dir, "app.py") with open(app_file, "w", encoding="utf-8") as f: f.write(APP_FILE_TEMPLATE.format(module_name=last_module, class_name=self.__class__.__name__)) # Save requirements file requirements_file = os.path.join(output_dir, "requirements.txt") imports = [] for module in module_files: imports.extend(get_imports(module)) imports = list(set(imports)) with open(requirements_file, "w", encoding="utf-8") as f: f.write("\n".join(imports) + "\n") @classmethod def from_hub( cls, repo_id: str, model_repo_id: Optional[str] = None, token: Optional[str] = None, remote: bool = False, **kwargs, ): """ Loads a tool defined on the Hub. Args: repo_id (`str`): The name of the repo on the Hub where your tool is defined. model_repo_id (`str`, *optional*): If your tool uses a model and you want to use a different model than the default, you can pass a second repo ID or an endpoint url to this argument. token (`str`, *optional*): The token to identify you on hf.co. If unset, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). remote (`bool`, *optional*, defaults to `False`): Whether to use your tool by downloading the model or (if it is available) with an inference endpoint. kwargs (additional keyword arguments, *optional*): Additional keyword arguments that will be split in two: all arguments relevant to the Hub (such as `cache_dir`, `revision`, `subfolder`) will be used when downloading the files for your tool, and the others will be passed along to its init. """ if remote and model_repo_id is None: endpoints = get_default_endpoints() if repo_id not in endpoints: raise ValueError( f"Could not infer a default endpoint for {repo_id}, you need to pass one using the " "`model_repo_id` argument." ) model_repo_id = endpoints[repo_id] hub_kwargs_names = [ "cache_dir", "force_download", "resume_download", "proxies", "revision", "repo_type", "subfolder", "local_files_only", ] hub_kwargs = {k: v for k, v in kwargs.items() if k in hub_kwargs_names} # Try to get the tool config first. hub_kwargs["repo_type"] = get_repo_type(repo_id, **hub_kwargs) resolved_config_file = cached_file( repo_id, TOOL_CONFIG_FILE, token=token, **hub_kwargs, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) is_tool_config = resolved_config_file is not None if resolved_config_file is None: resolved_config_file = cached_file( repo_id, CONFIG_NAME, token=token, **hub_kwargs, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) if resolved_config_file is None: raise EnvironmentError( f"{repo_id} does not appear to provide a valid configuration in `tool_config.json` or `config.json`." ) with open(resolved_config_file, encoding="utf-8") as reader: config = json.load(reader) if not is_tool_config: if "custom_tool" not in config: raise EnvironmentError( f"{repo_id} does not provide a mapping to custom tools in its configuration `config.json`." ) custom_tool = config["custom_tool"] else: custom_tool = config tool_class = custom_tool["tool_class"] tool_class = get_class_from_dynamic_module(tool_class, repo_id, token=token, **hub_kwargs) if len(tool_class.name) == 0: tool_class.name = custom_tool["name"] if tool_class.name != custom_tool["name"]: logger.warning( f"{tool_class.__name__} implements a different name in its configuration and class. Using the tool " "configuration name." ) tool_class.name = custom_tool["name"] if len(tool_class.description) == 0: tool_class.description = custom_tool["description"] if tool_class.description != custom_tool["description"]: logger.warning( f"{tool_class.__name__} implements a different description in its configuration and class. Using the " "tool configuration description." ) tool_class.description = custom_tool["description"] if remote: return RemoteTool(model_repo_id, token=token, tool_class=tool_class) return tool_class(model_repo_id, token=token, **kwargs) def push_to_hub( self, repo_id: str, commit_message: str = "Upload tool", private: Optional[bool] = None, token: Optional[Union[bool, str]] = None, create_pr: bool = False, ) -> str: """ Upload the tool to the Hub. Parameters: repo_id (`str`): The name of the repository you want to push your tool to. It should contain your organization name when pushing to a given organization. commit_message (`str`, *optional*, defaults to `"Upload tool"`): Message to commit while pushing. private (`bool`, *optional*): Whether or not the repository created should be private. token (`bool` or `str`, *optional*): The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). create_pr (`bool`, *optional*, defaults to `False`): Whether or not to create a PR with the uploaded files or directly commit. """ repo_url = create_repo( repo_id=repo_id, token=token, private=private, exist_ok=True, repo_type="space", space_sdk="gradio" ) repo_id = repo_url.repo_id metadata_update(repo_id, {"tags": ["tool"]}, repo_type="space") with tempfile.TemporaryDirectory() as work_dir: # Save all files. self.save(work_dir) logger.info(f"Uploading the following files to {repo_id}: {','.join(os.listdir(work_dir))}") return upload_folder( repo_id=repo_id, commit_message=commit_message, folder_path=work_dir, token=token, create_pr=create_pr, repo_type="space", ) @staticmethod def from_gradio(gradio_tool): """ Creates a [`Tool`] from a gradio tool. """ class GradioToolWrapper(Tool): def __init__(self, _gradio_tool): super().__init__() self.name = _gradio_tool.name self.description = _gradio_tool.description GradioToolWrapper.__call__ = gradio_tool.run return GradioToolWrapper(gradio_tool) class RemoteTool(Tool): """ A [`Tool`] that will make requests to an inference endpoint. Args: endpoint_url (`str`, *optional*): The url of the endpoint to use. token (`str`, *optional*): The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). tool_class (`type`, *optional*): The corresponding `tool_class` if this is a remote version of an existing tool. Will help determine when the output should be converted to another type (like images). """ def __init__(self, endpoint_url=None, token=None, tool_class=None): self.endpoint_url = endpoint_url self.client = EndpointClient(endpoint_url, token=token) self.tool_class = tool_class def prepare_inputs(self, *args, **kwargs): """ Prepare the inputs received for the HTTP client sending data to the endpoint. Positional arguments will be matched with the signature of the `tool_class` if it was provided at instantation. Images will be encoded into bytes. You can override this method in your custom class of [`RemoteTool`]. """ inputs = kwargs.copy() if len(args) > 0: if self.tool_class is not None: # Match args with the signature if issubclass(self.tool_class, PipelineTool): call_method = self.tool_class.encode else: call_method = self.tool_class.__call__ signature = inspect.signature(call_method).parameters parameters = [ k for k, p in signature.items() if p.kind not in [inspect._ParameterKind.VAR_POSITIONAL, inspect._ParameterKind.VAR_KEYWORD] ] if parameters[0] == "self": parameters = parameters[1:] if len(args) > len(parameters): raise ValueError( f"{self.tool_class} only accepts {len(parameters)} arguments but {len(args)} were given." ) for arg, name in zip(args, parameters): inputs[name] = arg elif len(args) > 1: raise ValueError("A `RemoteTool` can only accept one positional input.") elif len(args) == 1: if is_pil_image(args[0]): return {"inputs": self.client.encode_image(args[0])} return {"inputs": args[0]} for key, value in inputs.items(): if is_pil_image(value): inputs[key] = self.client.encode_image(value) return {"inputs": inputs} def extract_outputs(self, outputs): """ You can override this method in your custom class of [`RemoteTool`] to apply some custom post-processing of the outputs of the endpoint. """ return outputs def __call__(self, *args, **kwargs): args, kwargs = handle_agent_inputs(*args, **kwargs) output_image = self.tool_class is not None and self.tool_class.outputs == ["image"] inputs = self.prepare_inputs(*args, **kwargs) if isinstance(inputs, dict): outputs = self.client(**inputs, output_image=output_image) else: outputs = self.client(inputs, output_image=output_image) if isinstance(outputs, list) and len(outputs) == 1 and isinstance(outputs[0], list): outputs = outputs[0] outputs = handle_agent_outputs(outputs, self.tool_class.outputs if self.tool_class is not None else None) return self.extract_outputs(outputs) class PipelineTool(Tool): """ A [`Tool`] tailored towards Transformer models. On top of the class attributes of the base class [`Tool`], you will need to specify: - **model_class** (`type`) -- The class to use to load the model in this tool. - **default_checkpoint** (`str`) -- The default checkpoint that should be used when the user doesn't specify one. - **pre_processor_class** (`type`, *optional*, defaults to [`AutoProcessor`]) -- The class to use to load the pre-processor - **post_processor_class** (`type`, *optional*, defaults to [`AutoProcessor`]) -- The class to use to load the post-processor (when different from the pre-processor). Args: model (`str` or [`PreTrainedModel`], *optional*): The name of the checkpoint to use for the model, or the instantiated model. If unset, will default to the value of the class attribute `default_checkpoint`. pre_processor (`str` or `Any`, *optional*): The name of the checkpoint to use for the pre-processor, or the instantiated pre-processor (can be a tokenizer, an image processor, a feature extractor or a processor). Will default to the value of `model` if unset. post_processor (`str` or `Any`, *optional*): The name of the checkpoint to use for the post-processor, or the instantiated pre-processor (can be a tokenizer, an image processor, a feature extractor or a processor). Will default to the `pre_processor` if unset. device (`int`, `str` or `torch.device`, *optional*): The device on which to execute the model. Will default to any accelerator available (GPU, MPS etc...), the CPU otherwise. device_map (`str` or `dict`, *optional*): If passed along, will be used to instantiate the model. model_kwargs (`dict`, *optional*): Any keyword argument to send to the model instantiation. token (`str`, *optional*): The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). hub_kwargs (additional keyword arguments, *optional*): Any additional keyword argument to send to the methods that will load the data from the Hub. """ pre_processor_class = AutoProcessor model_class = None post_processor_class = AutoProcessor default_checkpoint = None def __init__( self, model=None, pre_processor=None, post_processor=None, device=None, device_map=None, model_kwargs=None, token=None, **hub_kwargs, ): if not is_torch_available(): raise ImportError("Please install torch in order to use this tool.") if not is_accelerate_available(): raise ImportError("Please install accelerate in order to use this tool.") if model is None: if self.default_checkpoint is None: raise ValueError("This tool does not implement a default checkpoint, you need to pass one.") model = self.default_checkpoint if pre_processor is None: pre_processor = model self.model = model self.pre_processor = pre_processor self.post_processor = post_processor self.device = device self.device_map = device_map self.model_kwargs = {} if model_kwargs is None else model_kwargs if device_map is not None: self.model_kwargs["device_map"] = device_map self.hub_kwargs = hub_kwargs self.hub_kwargs["token"] = token super().__init__() def setup(self): """ Instantiates the `pre_processor`, `model` and `post_processor` if necessary. """ if isinstance(self.pre_processor, str): self.pre_processor = self.pre_processor_class.from_pretrained(self.pre_processor, **self.hub_kwargs) if isinstance(self.model, str): self.model = self.model_class.from_pretrained(self.model, **self.model_kwargs, **self.hub_kwargs) if self.post_processor is None: self.post_processor = self.pre_processor elif isinstance(self.post_processor, str): self.post_processor = self.post_processor_class.from_pretrained(self.post_processor, **self.hub_kwargs) if self.device is None: if self.device_map is not None: self.device = list(self.model.hf_device_map.values())[0] else: self.device = PartialState().default_device if self.device_map is None: self.model.to(self.device) super().setup() def encode(self, raw_inputs): """ Uses the `pre_processor` to prepare the inputs for the `model`. """ return self.pre_processor(raw_inputs) def forward(self, inputs): """ Sends the inputs through the `model`. """ with torch.no_grad(): return self.model(**inputs) def decode(self, outputs): """ Uses the `post_processor` to decode the model output. """ return self.post_processor(outputs) def __call__(self, *args, **kwargs): args, kwargs = handle_agent_inputs(*args, **kwargs) if not self.is_initialized: self.setup() encoded_inputs = self.encode(*args, **kwargs) encoded_inputs = send_to_device(encoded_inputs, self.device) outputs = self.forward(encoded_inputs) outputs = send_to_device(outputs, "cpu") decoded_outputs = self.decode(outputs) return handle_agent_outputs(decoded_outputs, self.outputs) def launch_gradio_demo(tool_class: Tool): """ Launches a gradio demo for a tool. The corresponding tool class needs to properly implement the class attributes `inputs` and `outputs`. Args: tool_class (`type`): The class of the tool for which to launch the demo. """ try: import gradio as gr except ImportError: raise ImportError("Gradio should be installed in order to launch a gradio demo.") tool = tool_class() def fn(*args, **kwargs): return tool(*args, **kwargs) gr.Interface( fn=fn, inputs=tool_class.inputs, outputs=tool_class.outputs, title=tool_class.__name__, article=tool.description, ).launch() TASK_MAPPING = { "document-question-answering": "DocumentQuestionAnsweringTool", "image-captioning": "ImageCaptioningTool", "image-question-answering": "ImageQuestionAnsweringTool", "image-segmentation": "ImageSegmentationTool", "speech-to-text": "SpeechToTextTool", "summarization": "TextSummarizationTool", "text-classification": "TextClassificationTool", "text-question-answering": "TextQuestionAnsweringTool", "text-to-speech": "TextToSpeechTool", "translation": "TranslationTool", } def get_default_endpoints(): endpoints_file = cached_file("huggingface-tools/default-endpoints", "default_endpoints.json", repo_type="dataset") with open(endpoints_file, "r", encoding="utf-8") as f: endpoints = json.load(f) return endpoints def supports_remote(task_or_repo_id): endpoints = get_default_endpoints() return task_or_repo_id in endpoints def load_tool(task_or_repo_id, model_repo_id=None, remote=False, token=None, **kwargs): """ Main function to quickly load a tool, be it on the Hub or in the Transformers library. Args: task_or_repo_id (`str`): The task for which to load the tool or a repo ID of a tool on the Hub. Tasks implemented in Transformers are: - `"document-question-answering"` - `"image-captioning"` - `"image-question-answering"` - `"image-segmentation"` - `"speech-to-text"` - `"summarization"` - `"text-classification"` - `"text-question-answering"` - `"text-to-speech"` - `"translation"` model_repo_id (`str`, *optional*): Use this argument to use a different model than the default one for the tool you selected. remote (`bool`, *optional*, defaults to `False`): Whether to use your tool by downloading the model or (if it is available) with an inference endpoint. token (`str`, *optional*): The token to identify you on hf.co. If unset, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). kwargs (additional keyword arguments, *optional*): Additional keyword arguments that will be split in two: all arguments relevant to the Hub (such as `cache_dir`, `revision`, `subfolder`) will be used when downloading the files for your tool, and the others will be passed along to its init. """ if task_or_repo_id in TASK_MAPPING: tool_class_name = TASK_MAPPING[task_or_repo_id] main_module = importlib.import_module("transformers") tools_module = main_module.tools tool_class = getattr(tools_module, tool_class_name) if remote: if model_repo_id is None: endpoints = get_default_endpoints() if task_or_repo_id not in endpoints: raise ValueError( f"Could not infer a default endpoint for {task_or_repo_id}, you need to pass one using the " "`model_repo_id` argument." ) model_repo_id = endpoints[task_or_repo_id] return RemoteTool(model_repo_id, token=token, tool_class=tool_class) else: return tool_class(model_repo_id, token=token, **kwargs) else: return Tool.from_hub(task_or_repo_id, model_repo_id=model_repo_id, token=token, remote=remote, **kwargs) def add_description(description): """ A decorator that adds a description to a function. """ def inner(func): func.description = description func.name = func.__name__ return func return inner ## Will move to the Hub class EndpointClient: def __init__(self, endpoint_url: str, token: Optional[str] = None): self.headers = {**build_hf_headers(token=token), "Content-Type": "application/json"} self.endpoint_url = endpoint_url @staticmethod def encode_image(image): _bytes = io.BytesIO() image.save(_bytes, format="PNG") b64 = base64.b64encode(_bytes.getvalue()) return b64.decode("utf-8") @staticmethod def decode_image(raw_image): if not is_vision_available(): raise ImportError( "This tool returned an image but Pillow is not installed. Please install it (`pip install Pillow`)." ) from PIL import Image b64 = base64.b64decode(raw_image) _bytes = io.BytesIO(b64) return Image.open(_bytes) def __call__( self, inputs: Optional[Union[str, Dict, List[str], List[List[str]]]] = None, params: Optional[Dict] = None, data: Optional[bytes] = None, output_image: bool = False, ) -> Any: # Build payload payload = {} if inputs: payload["inputs"] = inputs if params: payload["parameters"] = params # Make API call response = get_session().post(self.endpoint_url, headers=self.headers, json=payload, data=data) # By default, parse the response for the user. if output_image: return self.decode_image(response.content) else: return response.json()
transformers/src/transformers/tools/base.py/0
{ "file_path": "transformers/src/transformers/tools/base.py", "repo_id": "transformers", "token_count": 12529 }
385
# coding=utf-8 # Copyright 2020-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Torch utilities for the Trainer class. """ import datetime import json import math import os import sys import warnings from collections.abc import Mapping from contextlib import contextmanager from dataclasses import dataclass from logging import StreamHandler from typing import Any, Dict, Iterator, List, Optional, Union import numpy as np import torch import torch.distributed as dist from torch import nn from torch.utils.data import Dataset, IterableDataset, RandomSampler, Sampler from torch.utils.data.distributed import DistributedSampler from .integrations.deepspeed import is_deepspeed_zero3_enabled from .tokenization_utils_base import BatchEncoding from .utils import is_sagemaker_mp_enabled, is_torch_tpu_available, is_training_run_on_sagemaker, logging if is_training_run_on_sagemaker(): logging.add_handler(StreamHandler(sys.stdout)) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm # this is used to suppress an undesired warning emitted by pytorch versions 1.4.2-1.7.0 try: from torch.optim.lr_scheduler import SAVE_STATE_WARNING except ImportError: SAVE_STATE_WARNING = "" logger = logging.get_logger(__name__) def get_dataloader_sampler(dataloader): if hasattr(dataloader, "batch_sampler") and dataloader.batch_sampler is not None: return get_dataloader_sampler(dataloader.batch_sampler) elif hasattr(dataloader, "sampler"): return dataloader.sampler def atleast_1d(tensor_or_array: Union[torch.Tensor, np.ndarray]): if isinstance(tensor_or_array, torch.Tensor): if hasattr(torch, "atleast_1d"): tensor_or_array = torch.atleast_1d(tensor_or_array) elif tensor_or_array.ndim < 1: tensor_or_array = tensor_or_array[None] else: tensor_or_array = np.atleast_1d(tensor_or_array) return tensor_or_array def torch_pad_and_concatenate(tensor1, tensor2, padding_index=-100): """Concatenates `tensor1` and `tensor2` on first axis, applying padding on the second if necessary.""" tensor1 = atleast_1d(tensor1) tensor2 = atleast_1d(tensor2) if len(tensor1.shape) == 1 or tensor1.shape[1] == tensor2.shape[1]: return torch.cat((tensor1, tensor2), dim=0) # Let's figure out the new shape new_shape = (tensor1.shape[0] + tensor2.shape[0], max(tensor1.shape[1], tensor2.shape[1])) + tensor1.shape[2:] # Now let's fill the result tensor result = tensor1.new_full(new_shape, padding_index) result[: tensor1.shape[0], : tensor1.shape[1]] = tensor1 result[tensor1.shape[0] :, : tensor2.shape[1]] = tensor2 return result def numpy_pad_and_concatenate(array1, array2, padding_index=-100): """Concatenates `array1` and `array2` on first axis, applying padding on the second if necessary.""" array1 = atleast_1d(array1) array2 = atleast_1d(array2) if len(array1.shape) == 1 or array1.shape[1] == array2.shape[1]: return np.concatenate((array1, array2), axis=0) # Let's figure out the new shape new_shape = (array1.shape[0] + array2.shape[0], max(array1.shape[1], array2.shape[1])) + array1.shape[2:] # Now let's fill the result tensor result = np.full_like(array1, padding_index, shape=new_shape) result[: array1.shape[0], : array1.shape[1]] = array1 result[array1.shape[0] :, : array2.shape[1]] = array2 return result def nested_concat(tensors, new_tensors, padding_index=-100): """ Concat the `new_tensors` to `tensors` on the first dim and pad them on the second if needed. Works for tensors or nested list/tuples/dict of tensors. """ assert type(tensors) == type( new_tensors ), f"Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}." if isinstance(tensors, (list, tuple)): return type(tensors)(nested_concat(t, n, padding_index=padding_index) for t, n in zip(tensors, new_tensors)) elif isinstance(tensors, torch.Tensor): return torch_pad_and_concatenate(tensors, new_tensors, padding_index=padding_index) elif isinstance(tensors, Mapping): return type(tensors)( {k: nested_concat(t, new_tensors[k], padding_index=padding_index) for k, t in tensors.items()} ) elif isinstance(tensors, np.ndarray): return numpy_pad_and_concatenate(tensors, new_tensors, padding_index=padding_index) else: raise TypeError(f"Unsupported type for concatenation: got {type(tensors)}") def find_batch_size(tensors): """ Find the first dimension of a tensor in a nested list/tuple/dict of tensors. """ if isinstance(tensors, (list, tuple)): for t in tensors: result = find_batch_size(t) if result is not None: return result elif isinstance(tensors, Mapping): for key, value in tensors.items(): result = find_batch_size(value) if result is not None: return result elif isinstance(tensors, torch.Tensor): return tensors.shape[0] if len(tensors.shape) >= 1 else None elif isinstance(tensors, np.ndarray): return tensors.shape[0] if len(tensors.shape) >= 1 else None def nested_numpify(tensors): "Numpify `tensors` (even if it's a nested list/tuple/dict of tensors)." if isinstance(tensors, (list, tuple)): return type(tensors)(nested_numpify(t) for t in tensors) if isinstance(tensors, Mapping): return type(tensors)({k: nested_numpify(t) for k, t in tensors.items()}) t = tensors.cpu() if t.dtype == torch.bfloat16: # As of Numpy 1.21.4, NumPy does not support bfloat16 (see # https://github.com/numpy/numpy/blob/a47ecdea856986cd60eabbd53265c2ca5916ad5d/doc/source/user/basics.types.rst ). # Until Numpy adds bfloat16, we must convert float32. t = t.to(torch.float32) return t.numpy() def nested_detach(tensors): "Detach `tensors` (even if it's a nested list/tuple/dict of tensors)." if isinstance(tensors, (list, tuple)): return type(tensors)(nested_detach(t) for t in tensors) elif isinstance(tensors, Mapping): return type(tensors)({k: nested_detach(t) for k, t in tensors.items()}) return tensors.detach() def nested_xla_mesh_reduce(tensors, name): if is_torch_tpu_available(): import torch_xla.core.xla_model as xm if isinstance(tensors, (list, tuple)): return type(tensors)(nested_xla_mesh_reduce(t, f"{name}_{i}") for i, t in enumerate(tensors)) if isinstance(tensors, Mapping): return type(tensors)( {k: nested_xla_mesh_reduce(t, f"{name}_{i}") for i, (k, t) in enumerate(tensors.items())} ) tensors = atleast_1d(tensors) return xm.mesh_reduce(name, tensors, torch.cat) else: raise ImportError("Torch xla must be installed to use `nested_xla_mesh_reduce`") def distributed_concat(tensor: Any, num_total_examples: Optional[int] = None) -> Any: try: if isinstance(tensor, (tuple, list)): return type(tensor)(distributed_concat(t, num_total_examples) for t in tensor) if isinstance(tensor, Mapping): return type(tensor)({k: distributed_concat(t, num_total_examples) for k, t in tensor.items()}) tensor = atleast_1d(tensor).contiguous() output_tensors = [tensor.clone() for _ in range(dist.get_world_size())] dist.all_gather(output_tensors, tensor) concat = torch.cat(output_tensors, dim=0) # truncate the dummy elements added by SequentialDistributedSampler if num_total_examples is not None: concat = concat[:num_total_examples] return concat except AssertionError: raise AssertionError("Not currently using distributed training") def distributed_broadcast_scalars( scalars: List[Union[int, float]], num_total_examples: Optional[int] = None, device: Optional[torch.device] = torch.device("cuda"), ) -> torch.Tensor: try: tensorized_scalar = torch.tensor(scalars).to(device) output_tensors = [tensorized_scalar.clone() for _ in range(dist.get_world_size())] dist.all_gather(output_tensors, tensorized_scalar) concat = torch.cat(output_tensors, dim=0) # truncate the dummy elements added by SequentialDistributedSampler if num_total_examples is not None: concat = concat[:num_total_examples] return concat except AssertionError: raise AssertionError("Not currently using distributed training") def reissue_pt_warnings(caught_warnings): # Reissue warnings that are not the SAVE_STATE_WARNING if len(caught_warnings) > 1: for w in caught_warnings: if w.category != UserWarning or w.message != SAVE_STATE_WARNING: warnings.warn(w.message, w.category) @contextmanager def torch_distributed_zero_first(local_rank: int): """ Decorator to make all processes in distributed training wait for each local_master to do something. Args: local_rank (`int`): The rank of the local process. """ if local_rank not in [-1, 0]: dist.barrier() yield if local_rank == 0: dist.barrier() class DistributedSamplerWithLoop(DistributedSampler): """ Like a torch.utils.data.distributed.DistributedSampler` but loops at the end back to the beginning of the shuffled samples to make each process have a round multiple of batch_size samples. Args: dataset (`torch.utils.data.Dataset`): Dataset used for sampling. batch_size (`int`): The batch size used with this sampler kwargs (`Dict[str, Any]`, *optional*): All other keyword arguments passed to `DistributedSampler`. """ def __init__(self, dataset, batch_size, **kwargs): super().__init__(dataset, **kwargs) self.batch_size = batch_size def __iter__(self): indices = list(super().__iter__()) remainder = 0 if len(indices) % self.batch_size == 0 else self.batch_size - len(indices) % self.batch_size # DistributedSampler already added samples from the beginning to make the number of samples a round multiple # of the world size, so we skip those. start_remainder = 1 if self.rank < len(self.dataset) % self.num_replicas else 0 indices += indices[start_remainder : start_remainder + remainder] return iter(indices) class SequentialDistributedSampler(Sampler): """ Distributed Sampler that subsamples indices sequentially, making it easier to collate all results at the end. Even though we only use this sampler for eval and predict (no training), which means that the model params won't have to be synced (i.e. will not hang for synchronization even if varied number of forward passes), we still add extra samples to the sampler to make it evenly divisible (like in `DistributedSampler`) to make it easy to `gather` or `reduce` resulting tensors at the end of the loop. """ def __init__(self, dataset, num_replicas=None, rank=None, batch_size=None): warnings.warn( "SequentialDistributedSampler is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank num_samples = len(self.dataset) # Add extra samples to make num_samples a multiple of batch_size if passed if batch_size is not None: self.num_samples = int(math.ceil(num_samples / (batch_size * num_replicas))) * batch_size else: self.num_samples = int(math.ceil(num_samples / num_replicas)) self.total_size = self.num_samples * self.num_replicas self.batch_size = batch_size def __iter__(self): indices = list(range(len(self.dataset))) # add extra samples to make it evenly divisible indices += indices[: (self.total_size - len(indices))] assert ( len(indices) == self.total_size ), f"Indices length {len(indices)} and total size {self.total_size} mismatched" # subsample indices = indices[self.rank * self.num_samples : (self.rank + 1) * self.num_samples] assert ( len(indices) == self.num_samples ), f"Indices length {len(indices)} and sample number {self.num_samples} mismatched" return iter(indices) def __len__(self): return self.num_samples def get_tpu_sampler(dataset: torch.utils.data.Dataset, batch_size: int): if xm.xrt_world_size() <= 1: return RandomSampler(dataset) return DistributedSampler(dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()) def nested_new_like(arrays, num_samples, padding_index=-100): """Create the same nested structure as `arrays` with a first dimension always at `num_samples`.""" if isinstance(arrays, (list, tuple)): return type(arrays)(nested_new_like(x, num_samples) for x in arrays) return np.full_like(arrays, padding_index, shape=(num_samples, *arrays.shape[1:])) def expand_like(arrays, new_seq_length, padding_index=-100): """Expand the `arrays` so that the second dimension grows to `new_seq_length`. Uses `padding_index` for padding.""" result = np.full_like(arrays, padding_index, shape=(arrays.shape[0], new_seq_length) + arrays.shape[2:]) result[:, : arrays.shape[1]] = arrays return result def nested_truncate(tensors, limit): "Truncate `tensors` at `limit` (even if it's a nested list/tuple/dict of tensors)." if isinstance(tensors, (list, tuple)): return type(tensors)(nested_truncate(t, limit) for t in tensors) if isinstance(tensors, Mapping): return type(tensors)({k: nested_truncate(t, limit) for k, t in tensors.items()}) return tensors[:limit] class DistributedTensorGatherer: """ A class responsible for properly gathering tensors (or nested list/tuple of tensors) on the CPU by chunks. If our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on CPU at every step, our sampler will generate the following indices: `[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1]` to get something of size a multiple of 3 (so that each process gets the same dataset length). Then process 0, 1 and 2 will be responsible of making predictions for the following samples: - P0: `[0, 1, 2, 3, 4, 5]` - P1: `[6, 7, 8, 9, 10, 11]` - P2: `[12, 13, 14, 15, 0, 1]` The first batch treated on each process will be - P0: `[0, 1]` - P1: `[6, 7]` - P2: `[12, 13]` So if we gather at the end of the first batch, we will get a tensor (nested list/tuple of tensor) corresponding to the following indices: `[0, 1, 6, 7, 12, 13]` If we directly concatenate our results without taking any precautions, the user will then get the predictions for the indices in this order at the end of the prediction loop: `[0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1]` For some reason, that's not going to roll their boat. This class is there to solve that problem. Args: world_size (`int`): The number of processes used in the distributed training. num_samples (`int`): The number of samples in our dataset. make_multiple_of (`int`, *optional*): If passed, the class assumes the datasets passed to each process are made to be a multiple of this argument (by adding samples). padding_index (`int`, *optional*, defaults to -100): The padding index to use if the arrays don't all have the same sequence length. """ def __init__(self, world_size, num_samples, make_multiple_of=None, padding_index=-100): warnings.warn( "DistributedTensorGatherer is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.world_size = world_size self.num_samples = num_samples total_size = world_size if make_multiple_of is None else world_size * make_multiple_of self.total_samples = int(np.ceil(num_samples / total_size)) * total_size self.process_length = self.total_samples // world_size self._storage = None self._offsets = None self.padding_index = padding_index def add_arrays(self, arrays): """ Add `arrays` to the internal storage, Will initialize the storage to the full size at the first arrays passed so that if we're bound to get an OOM, it happens at the beginning. """ if arrays is None: return if self._storage is None: self._storage = nested_new_like(arrays, self.total_samples, padding_index=self.padding_index) self._offsets = list(range(0, self.total_samples, self.process_length)) slice_len, self._storage = self._nested_set_tensors(self._storage, arrays) for i in range(self.world_size): self._offsets[i] += slice_len def _nested_set_tensors(self, storage, arrays): if isinstance(arrays, (list, tuple)): result = [self._nested_set_tensors(x, y) for x, y in zip(storage, arrays)] return result[0][0], type(arrays)(r[1] for r in result) assert ( arrays.shape[0] % self.world_size == 0 ), f"Arrays passed should all have a first dimension multiple of {self.world_size}, found {arrays.shape[0]}." slice_len = arrays.shape[0] // self.world_size for i in range(self.world_size): if len(arrays.shape) == 1: storage[self._offsets[i] : self._offsets[i] + slice_len] = arrays[i * slice_len : (i + 1) * slice_len] else: # Expand the array on the fly if needed. if len(storage.shape) > 1 and storage.shape[1] < arrays.shape[1]: storage = expand_like(storage, arrays.shape[1], padding_index=self.padding_index) storage[self._offsets[i] : self._offsets[i] + slice_len, : arrays.shape[1]] = arrays[ i * slice_len : (i + 1) * slice_len ] return slice_len, storage def finalize(self): """ Return the properly gathered arrays and truncate to the number of samples (since the sampler added some extras to get each process a dataset of the same length). """ if self._storage is None: return if self._offsets[0] != self.process_length: logger.warning("Not all data has been set. Are you sure you passed all values?") return nested_truncate(self._storage, self.num_samples) @dataclass class LabelSmoother: """ Adds label-smoothing on a pre-computed output from a Transformers model. Args: epsilon (`float`, *optional*, defaults to 0.1): The label smoothing factor. ignore_index (`int`, *optional*, defaults to -100): The index in the labels to ignore when computing the loss. """ epsilon: float = 0.1 ignore_index: int = -100 def __call__(self, model_output, labels, shift_labels=False): logits = model_output["logits"] if isinstance(model_output, dict) else model_output[0] if shift_labels: logits = logits[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() log_probs = -nn.functional.log_softmax(logits, dim=-1) if labels.dim() == log_probs.dim() - 1: labels = labels.unsqueeze(-1) padding_mask = labels.eq(self.ignore_index) # In case the ignore_index is -100, the gather will fail, so we replace labels by 0. The padding_mask # will ignore them in any case. labels = torch.clamp(labels, min=0) nll_loss = log_probs.gather(dim=-1, index=labels) # works for fp16 input tensor too, by internally upcasting it to fp32 smoothed_loss = log_probs.sum(dim=-1, keepdim=True, dtype=torch.float32) nll_loss.masked_fill_(padding_mask, 0.0) smoothed_loss.masked_fill_(padding_mask, 0.0) # Take the mean over the label dimensions, then divide by the number of active elements (i.e. not-padded): num_active_elements = padding_mask.numel() - padding_mask.long().sum() nll_loss = nll_loss.sum() / num_active_elements smoothed_loss = smoothed_loss.sum() / (num_active_elements * log_probs.shape[-1]) return (1 - self.epsilon) * nll_loss + self.epsilon * smoothed_loss def get_length_grouped_indices(lengths, batch_size, mega_batch_mult=None, generator=None): """ Return a list of indices so that each slice of `batch_size` consecutive indices correspond to elements of similar lengths. To do this, the indices are: - randomly permuted - grouped in mega-batches of size `mega_batch_mult * batch_size` - sorted by length in each mega-batch The result is the concatenation of all mega-batches, with the batch of `batch_size` containing the element of maximum length placed first, so that an OOM happens sooner rather than later. """ # Default for mega_batch_mult: 50 or the number to get 4 megabatches, whichever is smaller. if mega_batch_mult is None: mega_batch_mult = min(len(lengths) // (batch_size * 4), 50) # Just in case, for tiny datasets if mega_batch_mult == 0: mega_batch_mult = 1 # We need to use torch for the random part as a distributed sampler will set the random seed for torch. indices = torch.randperm(len(lengths), generator=generator) megabatch_size = mega_batch_mult * batch_size megabatches = [indices[i : i + megabatch_size].tolist() for i in range(0, len(lengths), megabatch_size)] megabatches = [sorted(megabatch, key=lambda i: lengths[i], reverse=True) for megabatch in megabatches] # The rest is to get the biggest batch first. # Since each megabatch is sorted by descending length, the longest element is the first megabatch_maximums = [lengths[megabatch[0]] for megabatch in megabatches] max_idx = torch.argmax(torch.tensor(megabatch_maximums)).item() # Switch to put the longest element in first position megabatches[0][0], megabatches[max_idx][0] = megabatches[max_idx][0], megabatches[0][0] return [i for megabatch in megabatches for i in megabatch] class LengthGroupedSampler(Sampler): r""" Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while keeping a bit of randomness. """ def __init__( self, batch_size: int, dataset: Optional[Dataset] = None, lengths: Optional[List[int]] = None, model_input_name: Optional[str] = None, generator=None, ): if dataset is None and lengths is None: raise ValueError("One of dataset and lengths must be provided.") self.batch_size = batch_size if lengths is None: model_input_name = model_input_name if model_input_name is not None else "input_ids" if ( not (isinstance(dataset[0], dict) or isinstance(dataset[0], BatchEncoding)) or model_input_name not in dataset[0] ): raise ValueError( "Can only automatically infer lengths for datasets whose items are dictionaries with an " f"'{model_input_name}' key." ) lengths = [len(feature[model_input_name]) for feature in dataset] elif isinstance(lengths, torch.Tensor): logger.info( "If lengths is a torch.Tensor, LengthGroupedSampler will be slow. Converting lengths to List[int]..." ) lengths = lengths.tolist() self.lengths = lengths self.generator = generator def __len__(self): return len(self.lengths) def __iter__(self): indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=self.generator) return iter(indices) class DistributedLengthGroupedSampler(DistributedSampler): r""" Distributed Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while keeping a bit of randomness. """ # Copied and adapted from PyTorch DistributedSampler. def __init__( self, batch_size: int, dataset: Optional[Dataset] = None, num_replicas: Optional[int] = None, rank: Optional[int] = None, seed: int = 0, drop_last: bool = False, lengths: Optional[List[int]] = None, model_input_name: Optional[str] = None, ): if dataset is None and lengths is None: raise ValueError("One of dataset and lengths must be provided.") if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") rank = dist.get_rank() self.batch_size = batch_size self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.drop_last = drop_last if lengths is None: model_input_name = model_input_name if model_input_name is not None else "input_ids" if ( not (isinstance(dataset[0], dict) or isinstance(dataset[0], BatchEncoding)) or model_input_name not in dataset[0] ): raise ValueError( "Can only automatically infer lengths for datasets whose items are dictionaries with an " f"'{model_input_name}' key." ) lengths = [len(feature[model_input_name]) for feature in dataset] elif isinstance(lengths, torch.Tensor): logger.info( "If lengths is a torch.Tensor, DistributedLengthGroupedSampler will be slow. Converting lengths to" " List[int]..." ) lengths = lengths.tolist() self.lengths = lengths # If the dataset length is evenly divisible by # of replicas, then there # is no need to drop any data, since the dataset will be split equally. if self.drop_last and len(self.lengths) % self.num_replicas != 0: # Split to nearest available length that is evenly divisible. # This is to ensure each rank receives the same amount of data when # using this Sampler. self.num_samples = math.ceil((len(self.lengths) - self.num_replicas) / self.num_replicas) else: self.num_samples = math.ceil(len(self.lengths) / self.num_replicas) self.total_size = self.num_samples * self.num_replicas self.seed = seed def __iter__(self) -> Iterator: # Deterministically shuffle based on epoch and seed g = torch.Generator() g.manual_seed(self.seed + self.epoch) indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=g) if not self.drop_last: # add extra samples to make it evenly divisible indices += indices[: (self.total_size - len(indices))] else: # remove tail of data to make it evenly divisible. indices = indices[: self.total_size] assert len(indices) == self.total_size # subsample indices = indices[self.rank : self.total_size : self.num_replicas] assert len(indices) == self.num_samples return iter(indices) class ShardSampler(Sampler): """ Sampler that shards batches between several processes. Dispatches indices batch by batch: on 2 processes with batch size 4, the first two batches are `[0, 1, 2, 3, 4, 5, 6, 7]` and `[8, 9, 10, 11, 12, 13, 14, 15]`, which shard into `[0, 1, 2, 3]` and `[8, 9, 10, 11]` for GPU-0 and `[4, 5, 6, 7]` and `[12, 13, 14, 15]` for GPU-1. The sampler thus yields `[0, 1, 2, 3, 8, 9, 10, 11]` on GPU-0 and `[4, 5, 6, 7, 12, 13, 14, 15]` on GPU-1. """ def __init__( self, dataset: Dataset, batch_size: int = 1, drop_last: bool = False, num_processes: int = 1, process_index: int = 0, ): self.dataset = dataset self.batch_size = batch_size self.drop_last = drop_last self.num_processes = num_processes self.process_index = process_index self.total_batch_size = total_batch_size = batch_size * num_processes num_batches = len(dataset) // total_batch_size if drop_last else math.ceil(len(dataset) / total_batch_size) self.total_num_samples = num_batches * total_batch_size def __iter__(self): indices = list(range(len(self.dataset))) # Add extra samples to make it evenly divisible. While loop is there in the edge case we have a tiny dataset # and it needs to be done several times. while len(indices) < self.total_num_samples: indices += indices[: (self.total_num_samples - len(indices))] result = [] for batch_start in range(self.batch_size * self.process_index, self.total_num_samples, self.total_batch_size): result += indices[batch_start : batch_start + self.batch_size] return iter(result) def __len__(self): # Each shard only sees a fraction of total_num_samples. return self.total_num_samples // self.num_processes class IterableDatasetShard(IterableDataset): """ Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will always yield a number of samples that is a round multiple of the actual batch size (which is `batch_size x num_processes`). Depending on the value of the `drop_last` attribute, it will either stop the iteration at the first batch that would be too small or loop with indices from the beginning. On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]` with a batch size of 2: - the shard on process 0 will yield `[0, 1, 4, 5, 8, 9]` so will see batches `[0, 1]`, `[4, 5]`, `[8, 9]` - the shard on process 1 will yield `[2, 3, 6, 7, 10, 11]` so will see batches `[2, 3]`, `[6, 7]`, `[10, 11]` <Tip warning={true}> If your IterableDataset implements some randomization that needs to be applied the same way on all processes (for instance, a shuffling), you should use a `torch.Generator` in a `generator` attribute of the `dataset` to generate your random numbers and call the [`~trainer_pt_utils.IterableDatasetShard.set_epoch`] method of this object. It will set the seed of this `generator` to `seed + epoch` on all processes before starting the iteration. Alternatively, you can also implement a `set_epoch()` method in your iterable dataset to deal with this. </Tip> Args: dataset (`torch.utils.data.IterableDataset`): The batch sampler to split in several shards. batch_size (`int`, *optional*, defaults to 1): The size of the batches per shard. drop_last (`bool`, *optional*, defaults to `False`): Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the beginning. num_processes (`int`, *optional*, defaults to 1): The number of processes running concurrently. process_index (`int`, *optional*, defaults to 0): The index of the current process. seed (`int`, *optional*, defaults to 0): A random seed that will be used for the random number generation in [`~trainer_pt_utils.IterableDatasetShard.set_epoch`]. """ def __init__( self, dataset: IterableDataset, batch_size: int = 1, drop_last: bool = False, num_processes: int = 1, process_index: int = 0, seed: int = 0, ): self.dataset = dataset self.batch_size = batch_size self.drop_last = drop_last self.num_processes = num_processes self.process_index = process_index self.seed = seed self.epoch = 0 self.num_examples = 0 def set_epoch(self, epoch): self.epoch = epoch if hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) def __iter__(self): self.num_examples = 0 if ( not hasattr(self.dataset, "set_epoch") and hasattr(self.dataset, "generator") and isinstance(self.dataset.generator, torch.Generator) ): self.dataset.generator.manual_seed(self.seed + self.epoch) real_batch_size = self.batch_size * self.num_processes process_slice = range(self.process_index * self.batch_size, (self.process_index + 1) * self.batch_size) first_batch = None current_batch = [] for element in self.dataset: self.num_examples += 1 current_batch.append(element) # Wait to have a full batch before yielding elements. if len(current_batch) == real_batch_size: for i in process_slice: yield current_batch[i] if first_batch is None: first_batch = current_batch.copy() current_batch = [] # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning. if not self.drop_last and len(current_batch) > 0: if first_batch is None: first_batch = current_batch.copy() while len(current_batch) < real_batch_size: current_batch += first_batch for i in process_slice: yield current_batch[i] def __len__(self): # Will raise an error if the underlying dataset is not sized. if self.drop_last: return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size else: return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size # In order to keep `trainer.py` compact and easy to understand, place any secondary PT Trainer # helper methods here def _get_learning_rate(self): if self.is_deepspeed_enabled: # with deepspeed's fp16 and dynamic loss scale enabled the optimizer/scheduler steps may # not run for the first few dozen steps while loss scale is too large, and thus during # that time `get_last_lr` will fail if called during that warm up stage, so work around it: try: last_lr = self.lr_scheduler.get_last_lr()[0] except AssertionError as e: if "need to call step" in str(e): logger.warning("tried to get lr value before scheduler/optimizer started stepping, returning lr=0") last_lr = 0 else: raise else: if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): last_lr = self.optimizer.param_groups[0]["lr"] else: last_lr = self.lr_scheduler.get_last_lr()[0] if torch.is_tensor(last_lr): last_lr = last_lr.item() return last_lr def _secs2timedelta(secs): """ convert seconds to hh:mm:ss.msec, msecs rounded to 2 decimals """ msec = int(abs(secs - int(secs)) * 100) return f"{datetime.timedelta(seconds=int(secs))}.{msec:02d}" def metrics_format(self, metrics: Dict[str, float]) -> Dict[str, float]: """ Reformat Trainer metrics values to a human-readable format Args: metrics (`Dict[str, float]`): The metrics returned from train/evaluate/predict Returns: metrics (`Dict[str, float]`): The reformatted metrics """ metrics_copy = metrics.copy() for k, v in metrics_copy.items(): if "_mem_" in k: metrics_copy[k] = f"{ v >> 20 }MB" elif "_runtime" in k: metrics_copy[k] = _secs2timedelta(v) elif k == "total_flos": metrics_copy[k] = f"{ int(v) >> 30 }GF" elif isinstance(metrics_copy[k], float): metrics_copy[k] = round(v, 4) return metrics_copy def log_metrics(self, split, metrics): """ Log metrics in a specially formatted way Under distributed environment this is done only for a process with rank 0. Args: split (`str`): Mode/split name: one of `train`, `eval`, `test` metrics (`Dict[str, float]`): The metrics returned from train/evaluate/predictmetrics: metrics dict Notes on memory reports: In order to get memory usage report you need to install `psutil`. You can do that with `pip install psutil`. Now when this method is run, you will see a report that will include: : ``` init_mem_cpu_alloc_delta = 1301MB init_mem_cpu_peaked_delta = 154MB init_mem_gpu_alloc_delta = 230MB init_mem_gpu_peaked_delta = 0MB train_mem_cpu_alloc_delta = 1345MB train_mem_cpu_peaked_delta = 0MB train_mem_gpu_alloc_delta = 693MB train_mem_gpu_peaked_delta = 7MB ``` **Understanding the reports:** - the first segment, e.g., `train__`, tells you which stage the metrics are for. Reports starting with `init_` will be added to the first stage that gets run. So that if only evaluation is run, the memory usage for the `__init__` will be reported along with the `eval_` metrics. - the third segment, is either `cpu` or `gpu`, tells you whether it's the general RAM or the gpu0 memory metric. - `*_alloc_delta` - is the difference in the used/allocated memory counter between the end and the start of the stage - it can be negative if a function released more memory than it allocated. - `*_peaked_delta` - is any extra memory that was consumed and then freed - relative to the current allocated memory counter - it is never negative. When you look at the metrics of any stage you add up `alloc_delta` + `peaked_delta` and you know how much memory was needed to complete that stage. The reporting happens only for process of rank 0 and gpu 0 (if there is a gpu). Typically this is enough since the main process does the bulk of work, but it could be not quite so if model parallel is used and then other GPUs may use a different amount of gpu memory. This is also not the same under DataParallel where gpu0 may require much more memory than the rest since it stores the gradient and optimizer states for all participating GPUS. Perhaps in the future these reports will evolve to measure those too. The CPU RAM metric measures RSS (Resident Set Size) includes both the memory which is unique to the process and the memory shared with other processes. It is important to note that it does not include swapped out memory, so the reports could be imprecise. The CPU peak memory is measured using a sampling thread. Due to python's GIL it may miss some of the peak memory if that thread didn't get a chance to run when the highest memory was used. Therefore this report can be less than reality. Using `tracemalloc` would have reported the exact peak memory, but it doesn't report memory allocations outside of python. So if some C++ CUDA extension allocated its own memory it won't be reported. And therefore it was dropped in favor of the memory sampling approach, which reads the current process memory usage. The GPU allocated and peak memory reporting is done with `torch.cuda.memory_allocated()` and `torch.cuda.max_memory_allocated()`. This metric reports only "deltas" for pytorch-specific allocations, as `torch.cuda` memory management system doesn't track any memory allocated outside of pytorch. For example, the very first cuda call typically loads CUDA kernels, which may take from 0.5 to 2GB of GPU memory. Note that this tracker doesn't account for memory allocations outside of [`Trainer`]'s `__init__`, `train`, `evaluate` and `predict` calls. Because `evaluation` calls may happen during `train`, we can't handle nested invocations because `torch.cuda.max_memory_allocated` is a single counter, so if it gets reset by a nested eval call, `train`'s tracker will report incorrect info. If this [pytorch issue](https://github.com/pytorch/pytorch/issues/16266) gets resolved it will be possible to change this class to be re-entrant. Until then we will only track the outer level of `train`, `evaluate` and `predict` methods. Which means that if `eval` is called during `train`, it's the latter that will account for its memory usage and that of the former. This also means that if any other tool that is used along the [`Trainer`] calls `torch.cuda.reset_peak_memory_stats`, the gpu peak memory stats could be invalid. And the [`Trainer`] will disrupt the normal behavior of any such tools that rely on calling `torch.cuda.reset_peak_memory_stats` themselves. For best performance you may want to consider turning the memory profiling off for production runs. """ if not self.is_world_process_zero(): return print(f"***** {split} metrics *****") metrics_formatted = self.metrics_format(metrics) k_width = max(len(str(x)) for x in metrics_formatted.keys()) v_width = max(len(str(x)) for x in metrics_formatted.values()) for key in sorted(metrics_formatted.keys()): print(f" {key: <{k_width}} = {metrics_formatted[key]:>{v_width}}") def save_metrics(self, split, metrics, combined=True): """ Save metrics into a json file for that split, e.g. `train_results.json`. Under distributed environment this is done only for a process with rank 0. Args: split (`str`): Mode/split name: one of `train`, `eval`, `test`, `all` metrics (`Dict[str, float]`): The metrics returned from train/evaluate/predict combined (`bool`, *optional*, defaults to `True`): Creates combined metrics by updating `all_results.json` with metrics of this call To understand the metrics please read the docstring of [`~Trainer.log_metrics`]. The only difference is that raw unformatted numbers are saved in the current method. """ if not self.is_world_process_zero(): return path = os.path.join(self.args.output_dir, f"{split}_results.json") with open(path, "w") as f: json.dump(metrics, f, indent=4, sort_keys=True) if combined: path = os.path.join(self.args.output_dir, "all_results.json") if os.path.exists(path): with open(path, "r") as f: all_metrics = json.load(f) else: all_metrics = {} all_metrics.update(metrics) with open(path, "w") as f: json.dump(all_metrics, f, indent=4, sort_keys=True) def save_state(self): """ Saves the Trainer state, since Trainer.save_model saves only the tokenizer with the model Under distributed environment this is done only for a process with rank 0. """ if not self.is_world_process_zero(): return path = os.path.join(self.args.output_dir, "trainer_state.json") self.state.save_to_json(path) def get_model_param_count(model, trainable_only=False): """ Calculate model's total param count. If trainable_only is True then count only those requiring grads """ if is_deepspeed_zero3_enabled(): def numel(p): return p.ds_numel if hasattr(p, "ds_numel") else p.numel() else: def numel(p): return p.numel() return sum(numel(p) for p in model.parameters() if not trainable_only or p.requires_grad) def get_parameter_names(model, forbidden_layer_types): """ Returns the names of the model parameters that are not inside a forbidden layer. """ result = [] for name, child in model.named_children(): result += [ f"{name}.{n}" for n in get_parameter_names(child, forbidden_layer_types) if not isinstance(child, tuple(forbidden_layer_types)) ] # Add model specific parameters (defined with nn.Parameter) since they are not in any child. result += list(model._parameters.keys()) return result def get_module_class_from_name(module, name): """ Gets a class from a module by its name. Args: module (`torch.nn.Module`): The module to get the class from. name (`str`): The name of the class. """ modules_children = list(module.children()) if module.__class__.__name__ == name: return module.__class__ elif len(modules_children) == 0: return else: for child_module in modules_children: module_class = get_module_class_from_name(child_module, name) if module_class is not None: return module_class def remove_dummy_checkpoint(is_main_process, output_dir, filenames): if is_main_process: for filename in filenames: file = os.path.join(output_dir, filename) if os.path.isfile(file): os.remove(file) if is_sagemaker_mp_enabled(): import smdistributed.modelparallel.torch as smp @smp.step() def smp_forward_backward(model, inputs, gradient_accumulation_steps=1): outputs = model(**inputs) loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] loss /= gradient_accumulation_steps model.backward(loss) return loss @smp.step() def smp_forward_only(model, inputs): return model(**inputs) def smp_gather(tensor): if isinstance(tensor, (list, tuple)): return type(tensor)(smp_gather(t) for t in tensor) elif isinstance(tensor, dict): return type(tensor)({k: smp_gather(v) for k, v in tensor.items()}) elif not isinstance(tensor, torch.Tensor): raise TypeError( f"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors." ) all_tensors = smp.allgather(tensor, smp.CommGroup.DP_GROUP) all_tensors = [atleast_1d(t) for t in all_tensors] return torch.cat([t.cpu() for t in all_tensors], dim=0) def smp_nested_concat(tensor): if isinstance(tensor, (list, tuple)): return type(tensor)(smp_nested_concat(t) for t in tensor) elif isinstance(tensor, dict): return type(tensor)({k: smp_nested_concat(v) for k, v in tensor.items()}) # It doesn't seem possible to check here if `tensor` is a StepOutput because StepOutput lives in `smp.step` # which is also the name of the decorator so Python is confused. return tensor.concat().detach().cpu()
transformers/src/transformers/trainer_pt_utils.py/0
{ "file_path": "transformers/src/transformers/trainer_pt_utils.py", "repo_id": "transformers", "token_count": 19364 }
386
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class PyTorchBenchmark(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PyTorchBenchmarkArguments(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Cache(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DynamicCache(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SinkCache(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GlueDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GlueDataTrainingArguments(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LineByLineTextDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LineByLineWithRefDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LineByLineWithSOPTextDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SquadDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SquadDataTrainingArguments(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TextDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TextDatasetForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlternatingCodebooksLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeamScorer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeamSearchScorer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClassifierFreeGuidanceLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConstrainedBeamSearchScorer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Constraint(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConstraintListState(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DisjunctiveConstraint(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EncoderNoRepeatNGramLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EncoderRepetitionPenaltyLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EpsilonLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EtaLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ExponentialDecayLengthPenalty(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ForcedBOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ForcedEOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ForceTokensLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GenerationMixin(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class HammingDiversityLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InfNanRemoveLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LogitNormalization(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LogitsProcessorList(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MaxLengthCriteria(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MaxTimeCriteria(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MinLengthLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MinNewTokensLengthLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NoBadWordsLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NoRepeatNGramLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PhrasalConstraint(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PrefixConstrainedLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RepetitionPenaltyLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SequenceBiasLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class StoppingCriteria(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class StoppingCriteriaList(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SuppressTokensAtBeginLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SuppressTokensLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TemperatureLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TopKLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TopPLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TypicalLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UnbatchedClassifierFreeGuidanceLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WhisperTimeStampLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def top_k_top_p_filtering(*args, **kwargs): requires_backends(top_k_top_p_filtering, ["torch"]) class PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class AlbertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_albert(*args, **kwargs): requires_backends(load_tf_weights_in_albert, ["torch"]) ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST = None class AlignModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlignPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlignTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlignVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class AltCLIPModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AltCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AltCLIPTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AltCLIPVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class ASTForAudioClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ASTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ASTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING = None MODEL_FOR_AUDIO_XVECTOR_MAPPING = None MODEL_FOR_BACKBONE_MAPPING = None MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING = None MODEL_FOR_CAUSAL_LM_MAPPING = None MODEL_FOR_CTC_MAPPING = None MODEL_FOR_DEPTH_ESTIMATION_MAPPING = None MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = None MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None MODEL_FOR_IMAGE_SEGMENTATION_MAPPING = None MODEL_FOR_IMAGE_TO_IMAGE_MAPPING = None MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING = None MODEL_FOR_MASK_GENERATION_MAPPING = None MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = None MODEL_FOR_MASKED_LM_MAPPING = None MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None MODEL_FOR_OBJECT_DETECTION_MAPPING = None MODEL_FOR_PRETRAINING_MAPPING = None MODEL_FOR_QUESTION_ANSWERING_MAPPING = None MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = None MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = None MODEL_FOR_TEXT_ENCODING_MAPPING = None MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING = None MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING = None MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING = None MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING = None MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING = None MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING = None MODEL_FOR_VISION_2_SEQ_MAPPING = None MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = None MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = None MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = None MODEL_MAPPING = None MODEL_WITH_LM_HEAD_MAPPING = None class AutoBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForAudioClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForAudioXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForDepthEstimation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForDocumentQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForImageSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForImageToImage(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForInstanceSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForMaskGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForSeq2SeqLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForSpeechSeq2Seq(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForTableQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForTextEncoding(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForTextToSpectrogram(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForTextToWaveform(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForUniversalSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForVideoClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForVision2Seq(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForVisualQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForZeroShotImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForZeroShotObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelWithLMHead(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class AutoformerForPrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BARK_PRETRAINED_MODEL_ARCHIVE_LIST = None class BarkCausalModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BarkCoarseModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BarkFineModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BarkModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BarkPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BarkSemanticModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BART_PRETRAINED_MODEL_ARCHIVE_LIST = None class BartForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartPretrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PretrainedBartModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class BeitBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeitForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeitForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeitForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeitPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class BertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_bert(*args, **kwargs): requires_backends(load_tf_weights_in_bert, ["torch"]) class BertGenerationDecoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertGenerationEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertGenerationPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_bert_generation(*args, **kwargs): requires_backends(load_tf_weights_in_bert_generation, ["torch"]) BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = None class BigBirdForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_big_bird(*args, **kwargs): requires_backends(load_tf_weights_in_big_bird, ["torch"]) BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = None class BigBirdPegasusForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPegasusForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPegasusForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPegasusForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPegasusModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPegasusPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class BioGptForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BioGptForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BioGptForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BioGptModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BioGptPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class BitBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BitForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BitPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST = None class BlenderbotForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST = None class BlenderbotSmallForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotSmallForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotSmallModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotSmallPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class BlipForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipForImageTextRetrieval(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Blip2ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Blip2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Blip2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Blip2QFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Blip2VisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST = None class BloomForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BloomForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BloomForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BloomForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BloomModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BloomPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST = None class BridgeTowerForContrastiveLearning(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BridgeTowerForImageAndTextRetrieval(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BridgeTowerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BridgeTowerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BridgeTowerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BROS_PRETRAINED_MODEL_ARCHIVE_LIST = None class BrosForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BrosModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BrosPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BrosProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BrosSpadeEEForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BrosSpadeELForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class CamembertForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CANINE_PRETRAINED_MODEL_ARCHIVE_LIST = None class CanineForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CanineForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CanineForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CanineForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CanineLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CanineModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CaninePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_canine(*args, **kwargs): requires_backends(load_tf_weights_in_canine, ["torch"]) CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class ChineseCLIPModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ChineseCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ChineseCLIPTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ChineseCLIPVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CLAP_PRETRAINED_MODEL_ARCHIVE_LIST = None class ClapAudioModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapAudioModelWithProjection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapFeatureExtractor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapTextModelWithProjection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class CLIPModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPTextModelWithProjection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPVisionModelWithProjection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST = None class CLIPSegForImageSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPSegModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPSegPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPSegTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPSegVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CLVP_PRETRAINED_MODEL_ARCHIVE_LIST = None class ClvpDecoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClvpEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClvpForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClvpModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClvpModelForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClvpPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST = None class CodeGenForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CodeGenModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CodeGenPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None class ConditionalDetrForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConditionalDetrForSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConditionalDetrModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConditionalDetrPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class ConvBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_convbert(*args, **kwargs): requires_backends(load_tf_weights_in_convbert, ["torch"]) CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None class ConvNextBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class ConvNextV2Backbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextV2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextV2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextV2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST = None class CpmAntForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CpmAntModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CpmAntPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None class CTRLForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CTRLLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CTRLModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CTRLPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CVT_PRETRAINED_MODEL_ARCHIVE_LIST = None class CvtForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CvtModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CvtPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST = None DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST = None class Data2VecAudioForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecAudioForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecAudioForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecAudioForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecAudioModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecAudioPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecVisionForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecVisionForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecVisionPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class DebertaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None class DebertaV2ForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2ForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class DecisionTransformerGPT2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DecisionTransformerGPT2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DecisionTransformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DecisionTransformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None class DeformableDetrForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeformableDetrModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeformableDetrPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class DeiTForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeiTForImageClassificationWithTeacher(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeiTForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeiTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeiTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST = None class MCTCTForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MCTCTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MCTCTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MMBTForClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MMBTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ModalEmbeddings(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenLlamaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenLlamaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenLlamaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenLlamaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class RetriBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RetriBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TrajectoryTransformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TrajectoryTransformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None class AdaptiveEmbedding(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TransfoXLForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TransfoXLLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TransfoXLModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TransfoXLPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_transfo_xl(*args, **kwargs): requires_backends(load_tf_weights_in_transfo_xl, ["torch"]) VAN_PRETRAINED_MODEL_ARCHIVE_LIST = None class VanForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VanModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VanPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DEPTH_ANYTHING_PRETRAINED_MODEL_ARCHIVE_LIST = None class DepthAnythingForDepthEstimation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DepthAnythingPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DETA_PRETRAINED_MODEL_ARCHIVE_LIST = None class DetaForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DetaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DetaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None class DetrForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DetrForSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DetrModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DetrPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DINAT_PRETRAINED_MODEL_ARCHIVE_LIST = None class DinatBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DinatForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DinatModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DinatPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Dinov2Backbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Dinov2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Dinov2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Dinov2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class DistilBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None class DonutSwinModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DonutSwinPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None class DPRContextEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRPretrainedContextEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRPretrainedQuestionEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRPretrainedReader(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRQuestionEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRReader(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class DPTForDepthEstimation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPTForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class EfficientFormerForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EfficientFormerForImageClassificationWithTeacher(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EfficientFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EfficientFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class EfficientNetForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EfficientNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EfficientNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None class ElectraForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_electra(*args, **kwargs): requires_backends(load_tf_weights_in_electra, ["torch"]) ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST = None class EncodecModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EncodecPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EncoderDecoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST = None class ErnieForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErniePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST = None class ErnieMForInformationExtraction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ESM_PRETRAINED_MODEL_ARCHIVE_LIST = None class EsmFoldPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmForProteinFolding(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FALCON_PRETRAINED_MODEL_ARCHIVE_LIST = None class FalconForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FalconForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FalconForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FalconForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FalconModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FalconPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class FastSpeech2ConformerHifiGan(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FastSpeech2ConformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FastSpeech2ConformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FastSpeech2ConformerWithHifiGan(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class FlaubertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertWithLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = None class FlavaForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaImageCodebook(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaImageModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaMultimodalModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class FNetForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class FocalNetBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FocalNetForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FocalNetForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FocalNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FocalNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FSMTForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FSMTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PretrainedFSMTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None class FunnelBaseModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_funnel(*args, **kwargs): requires_backends(load_tf_weights_in_funnel, ["torch"]) class FuyuForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FuyuPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class GitForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GitPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GitVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GLPN_PRETRAINED_MODEL_ARCHIVE_LIST = None class GLPNForDepthEstimation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GLPNModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GLPNPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPT2DoubleHeadsModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2LMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_gpt2(*args, **kwargs): requires_backends(load_tf_weights_in_gpt2, ["torch"]) GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTBigCodeForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTBigCodeForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTBigCodeForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTBigCodeModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTBigCodePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTNeoForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_gpt_neo(*args, **kwargs): requires_backends(load_tf_weights_in_gpt_neo, ["torch"]) GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTNeoXForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTNeoXJapaneseForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXJapaneseLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXJapaneseModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXJapanesePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTJForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTJForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTJForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTJModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTJPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTSanJapaneseForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTSanJapaneseModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTSanJapanesePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class GraphormerForGraphClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GraphormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GraphormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class GroupViTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GroupViTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GroupViTTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GroupViTVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class HubertForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class HubertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class HubertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class HubertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) IBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class IBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST = None class IdeficsForVisionText2Text(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IdeficsModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IdeficsPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IdeficsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class ImageGPTForCausalImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ImageGPTForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ImageGPTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ImageGPTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_imagegpt(*args, **kwargs): requires_backends(load_tf_weights_in_imagegpt, ["torch"]) INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class InformerForPrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class InstructBlipForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InstructBlipPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InstructBlipQFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InstructBlipVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST = None class JukeboxModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class JukeboxPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class JukeboxPrior(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class JukeboxVQVAE(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Kosmos2ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Kosmos2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Kosmos2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class LayoutLMForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class LayoutLMv2ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv2ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv2ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = None class LayoutLMv3ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv3ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv3ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv3Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv3PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LED_PRETRAINED_MODEL_ARCHIVE_LIST = None class LEDForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LEDForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LEDForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LEDModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LEDPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class LevitForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LevitForImageClassificationWithTeacher(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LevitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LevitPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LILT_PRETRAINED_MODEL_ARCHIVE_LIST = None class LiltForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LiltForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LiltForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LiltModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LiltPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlamaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlamaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlamaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlamaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = None class LlavaForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlavaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlavaProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class LongformerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerSelfAttention(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = None class LongT5EncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongT5Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongT5PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = None class LukeForEntityClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForEntityPairClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForEntitySpanClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertVisualFeatureEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertXLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST = None class M2M100ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class M2M100Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class M2M100PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarianForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarianModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarianMTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class MarkupLMForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarkupLMForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarkupLMForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarkupLMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarkupLMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class Mask2FormerForUniversalSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Mask2FormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Mask2FormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class MaskFormerForInstanceSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MaskFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MaskFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MaskFormerSwinBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MEGA_PRETRAINED_MODEL_ARCHIVE_LIST = None class MegaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class MegatronBertForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST = None class MgpstrForSceneTextRecognition(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MgpstrModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MgpstrPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MistralForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MistralForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MistralModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MistralPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MixtralForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MixtralForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MixtralModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MixtralPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class MobileBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_mobilebert(*args, **kwargs): requires_backends(load_tf_weights_in_mobilebert, ["torch"]) MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST = None class MobileNetV1ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileNetV1Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileNetV1PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_mobilenet_v1(*args, **kwargs): requires_backends(load_tf_weights_in_mobilenet_v1, ["torch"]) MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None class MobileNetV2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileNetV2ForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileNetV2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileNetV2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_mobilenet_v2(*args, **kwargs): requires_backends(load_tf_weights_in_mobilenet_v2, ["torch"]) MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class MobileViTForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class MobileViTV2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTV2ForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTV2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTV2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class MPNetForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class MptForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MptForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MptForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MptForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MptModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MptPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MRA_PRETRAINED_MODEL_ARCHIVE_LIST = None class MraForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5EncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST = None class MusicgenForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MusicgenForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MusicgenModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MusicgenPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MusicgenProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MVP_PRETRAINED_MODEL_ARCHIVE_LIST = None class MvpForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MvpForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MvpForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MvpForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MvpModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MvpPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) NAT_PRETRAINED_MODEL_ARCHIVE_LIST = None class NatBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NatForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NatModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NatPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST = None class NezhaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST = None class NllbMoeForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NllbMoeModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NllbMoePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NllbMoeSparseMLP(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NllbMoeTop2Router(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class NystromformerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class OneFormerForUniversalSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OneFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OneFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class OpenAIGPTDoubleHeadsModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenAIGPTForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenAIGPTLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenAIGPTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenAIGPTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_openai_gpt(*args, **kwargs): requires_backends(load_tf_weights_in_openai_gpt, ["torch"]) OPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class OPTForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OPTForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OPTForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OPTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OPTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Owlv2ForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Owlv2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Owlv2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Owlv2TextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Owlv2VisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class OwlViTForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OwlViTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OwlViTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OwlViTTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OwlViTVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST = None class PatchTSMixerForPrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSMixerForPretraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSMixerForRegression(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSMixerForTimeSeriesClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSMixerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSMixerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST = None class PatchTSTForClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSTForPrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSTForPretraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSTForRegression(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST = None class PegasusXForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusXModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusXPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST = None class PerceiverForImageClassificationConvProcessing(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForImageClassificationFourier(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForImageClassificationLearned(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForMultimodalAutoencoding(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForOpticalFlow(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PersimmonForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PersimmonForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PersimmonModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PersimmonPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PHI_PRETRAINED_MODEL_ARCHIVE_LIST = None class PhiForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PhiForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PhiForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PhiModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PhiPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST = None class Pix2StructForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Pix2StructPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Pix2StructTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Pix2StructVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PLBART_PRETRAINED_MODEL_ARCHIVE_LIST = None class PLBartForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PLBartForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PLBartForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PLBartModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PLBartPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class PoolFormerForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PoolFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PoolFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST = None class Pop2PianoForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Pop2PianoPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class ProphetNetDecoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ProphetNetEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ProphetNetForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ProphetNetForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ProphetNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ProphetNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PVT_PRETRAINED_MODEL_ARCHIVE_LIST = None class PvtForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PvtModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PvtPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class QDQBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_qdqbert(*args, **kwargs): requires_backends(load_tf_weights_in_qdqbert, ["torch"]) class Qwen2ForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Qwen2ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Qwen2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Qwen2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RagModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RagPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RagSequenceForGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RagTokenForGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) REALM_PRETRAINED_MODEL_ARCHIVE_LIST = None class RealmEmbedder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmForOpenQA(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmKnowledgeAugEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmReader(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmRetriever(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmScorer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_realm(*args, **kwargs): requires_backends(load_tf_weights_in_realm, ["torch"]) REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class ReformerAttention(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerModelWithLMHead(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class RegNetForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RegNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RegNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class RemBertForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_rembert(*args, **kwargs): requires_backends(load_tf_weights_in_rembert, ["torch"]) RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class ResNetBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ResNetForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ResNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ResNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class RobertaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = None class RobertaPreLayerNormForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class RoCBertForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_roc_bert(*args, **kwargs): requires_backends(load_tf_weights_in_roc_bert, ["torch"]) ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class RoFormerForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_roformer(*args, **kwargs): requires_backends(load_tf_weights_in_roformer, ["torch"]) RWKV_PRETRAINED_MODEL_ARCHIVE_LIST = None class RwkvForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RwkvModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RwkvPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SAM_PRETRAINED_MODEL_ARCHIVE_LIST = None class SamModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SamPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST = None class SeamlessM4TCodeHifiGan(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TForSpeechToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TForSpeechToText(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TForTextToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TForTextToText(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4THifiGan(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TTextToUnitForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TTextToUnitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None class SeamlessM4Tv2ForSpeechToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4Tv2ForSpeechToText(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4Tv2ForTextToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4Tv2ForTextToText(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4Tv2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4Tv2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class SegformerDecodeHead(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegformerForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegformerForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegformerLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SEW_PRETRAINED_MODEL_ARCHIVE_LIST = None class SEWForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST = None class SEWDForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWDForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWDModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWDPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class SiglipModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SiglipPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SiglipTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SiglipVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechEncoderDecoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None class Speech2TextForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Speech2TextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Speech2TextPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Speech2Text2ForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Speech2Text2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST = None class SpeechT5ForSpeechToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechT5ForSpeechToText(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechT5ForTextToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechT5HifiGan(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechT5Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechT5PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST = None class SplinterForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SplinterForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SplinterLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SplinterModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SplinterPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class SqueezeBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertModule(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class SwiftFormerForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwiftFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwiftFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None class SwinBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwinForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwinForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwinModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwinPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST = None class Swin2SRForImageSuperResolution(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swin2SRModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swin2SRPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Swinv2Backbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swinv2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swinv2ForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swinv2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swinv2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST = None class SwitchTransformersEncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwitchTransformersForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwitchTransformersModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwitchTransformersPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwitchTransformersSparseMLP(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwitchTransformersTop1Router(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) T5_PRETRAINED_MODEL_ARCHIVE_LIST = None class T5EncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_t5(*args, **kwargs): requires_backends(load_tf_weights_in_t5, ["torch"]) TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TableTransformerForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TableTransformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TableTransformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = None class TapasForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TapasForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TapasForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TapasModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TapasPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_tapas(*args, **kwargs): requires_backends(load_tf_weights_in_tapas, ["torch"]) TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TimeSeriesTransformerForPrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TimeSeriesTransformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TimeSeriesTransformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TimesformerForVideoClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TimesformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TimesformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TimmBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TROCR_PRETRAINED_MODEL_ARCHIVE_LIST = None class TrOCRForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TrOCRPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TVLT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TvltForAudioVisualClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TvltForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TvltModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TvltPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TVP_PRETRAINED_MODEL_ARCHIVE_LIST = None class TvpForVideoGrounding(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TvpModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TvpPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5EncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = None class UniSpeechForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST = None class UniSpeechSatForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class UnivNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UperNetForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UperNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST = None class VideoMAEForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VideoMAEForVideoClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VideoMAEModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VideoMAEPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VILT_PRETRAINED_MODEL_ARCHIVE_LIST = None class ViltForImageAndTextRetrieval(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltForImagesAndTextClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIPLLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = None class VipLlavaForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VipLlavaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisionEncoderDecoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisionTextDualEncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class VisualBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertForRegionToPhraseAlignment(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertForVisualReasoning(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class ViTForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST = None class ViTHybridForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTHybridModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTHybridPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST = None class ViTMAEForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTMAELayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTMAEModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTMAEPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST = None class ViTMSNForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTMSNModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTMSNPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VITDET_PRETRAINED_MODEL_ARCHIVE_LIST = None class VitDetBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VitDetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VitDetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST = None class VitMatteForImageMatting(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VitMattePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VITS_PRETRAINED_MODEL_ARCHIVE_LIST = None class VitsModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VitsPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class VivitForVideoClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VivitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VivitPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Wav2Vec2ForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class Wav2Vec2BertForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2BertForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2BertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2BertForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2BertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2BertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class Wav2Vec2ConformerForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class WavLMForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WavLMForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WavLMForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WavLMForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WavLMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WavLMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = None class WhisperForAudioClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WhisperForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WhisperForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WhisperModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WhisperPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class XCLIPModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XCLIPTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XCLIPVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class XGLMForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XGLMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XGLMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class XLMForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMWithLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class XLMProphetNetDecoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMProphetNetEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMProphetNetForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMProphetNetForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMProphetNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMProphetNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class XLMRobertaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None class XLMRobertaXLForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class XLNetForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_xlnet(*args, **kwargs): requires_backends(load_tf_weights_in_xlnet, ["torch"]) XMOD_PRETRAINED_MODEL_ARCHIVE_LIST = None class XmodForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST = None class YolosForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YolosModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YolosPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) YOSO_PRETRAINED_MODEL_ARCHIVE_LIST = None class YosoForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Adafactor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AdamW(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def get_constant_schedule(*args, **kwargs): requires_backends(get_constant_schedule, ["torch"]) def get_constant_schedule_with_warmup(*args, **kwargs): requires_backends(get_constant_schedule_with_warmup, ["torch"]) def get_cosine_schedule_with_warmup(*args, **kwargs): requires_backends(get_cosine_schedule_with_warmup, ["torch"]) def get_cosine_with_hard_restarts_schedule_with_warmup(*args, **kwargs): requires_backends(get_cosine_with_hard_restarts_schedule_with_warmup, ["torch"]) def get_inverse_sqrt_schedule(*args, **kwargs): requires_backends(get_inverse_sqrt_schedule, ["torch"]) def get_linear_schedule_with_warmup(*args, **kwargs): requires_backends(get_linear_schedule_with_warmup, ["torch"]) def get_polynomial_decay_schedule_with_warmup(*args, **kwargs): requires_backends(get_polynomial_decay_schedule_with_warmup, ["torch"]) def get_scheduler(*args, **kwargs): requires_backends(get_scheduler, ["torch"]) class Conv1D(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def apply_chunking_to_forward(*args, **kwargs): requires_backends(apply_chunking_to_forward, ["torch"]) def prune_layer(*args, **kwargs): requires_backends(prune_layer, ["torch"]) class Trainer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def torch_distributed_zero_first(*args, **kwargs): requires_backends(torch_distributed_zero_first, ["torch"]) class Seq2SeqTrainer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"])
transformers/src/transformers/utils/dummy_pt_objects.py/0
{ "file_path": "transformers/src/transformers/utils/dummy_pt_objects.py", "repo_id": "transformers", "token_count": 91504 }
387
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os from typing import Dict, Optional, Union from packaging import version from .hub import cached_file from .import_utils import is_peft_available ADAPTER_CONFIG_NAME = "adapter_config.json" ADAPTER_WEIGHTS_NAME = "adapter_model.bin" ADAPTER_SAFE_WEIGHTS_NAME = "adapter_model.safetensors" def find_adapter_config_file( model_id: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, subfolder: str = "", _commit_hash: Optional[str] = None, ) -> Optional[str]: r""" Simply checks if the model stored on the Hub or locally is an adapter model or not, return the path of the adapter config file if it is, None otherwise. Args: model_id (`str`): The identifier of the model to look for, can be either a local path or an id to the repository on the Hub. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>". </Tip> local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. """ adapter_cached_filename = None if model_id is None: return None elif os.path.isdir(model_id): list_remote_files = os.listdir(model_id) if ADAPTER_CONFIG_NAME in list_remote_files: adapter_cached_filename = os.path.join(model_id, ADAPTER_CONFIG_NAME) else: adapter_cached_filename = cached_file( model_id, ADAPTER_CONFIG_NAME, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, token=token, revision=revision, local_files_only=local_files_only, subfolder=subfolder, _commit_hash=_commit_hash, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) return adapter_cached_filename def check_peft_version(min_version: str) -> None: r""" Checks if the version of PEFT is compatible. Args: version (`str`): The version of PEFT to check against. """ if not is_peft_available(): raise ValueError("PEFT is not installed. Please install it with `pip install peft`") is_peft_version_compatible = version.parse(importlib.metadata.version("peft")) >= version.parse(min_version) if not is_peft_version_compatible: raise ValueError( f"The version of PEFT you are using is not compatible, please use a version that is greater" f" than {min_version}" )
transformers/src/transformers/utils/peft_utils.py/0
{ "file_path": "transformers/src/transformers/utils/peft_utils.py", "repo_id": "transformers", "token_count": 1972 }
388
# coding=utf-8 # Copyright 2022 {{cookiecutter.authors}} and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Flax {{cookiecutter.modelname}} model. """ {% if cookiecutter.is_encoder_decoder_model == "False" %} from typing import Callable, Optional, Tuple import numpy as np import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, unfreeze, freeze from flax.linen import combine_masks, make_causal_mask from flax.linen import partitioning as nn_partitioning from flax.traverse_util import flatten_dict, unflatten_dict from flax.linen.attention import dot_product_attention_weights from jax import lax from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_flax_outputs import ( FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxBaseModelOutputWithPoolingAndCrossAttentions, FlaxCausalLMOutput, FlaxCausalLMOutputWithCrossAttentions, FlaxMaskedLMOutput, FlaxMultipleChoiceModelOutput, FlaxQuestionAnsweringModelOutput, FlaxSequenceClassifierOutput, FlaxTokenClassifierOutput, ) from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring, ) from ...utils import logging from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "{{cookiecutter.checkpoint_identifier}}" _CONFIG_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Config" _TOKENIZER_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Tokenizer" {{cookiecutter.uppercase_modelname}}_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`~{{cookiecutter.uppercase_modelname}}Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r""" Args: input_ids (`numpy.ndarray` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`~{{cookiecutter.uppercase_modelname}}ConfiTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`numpy.ndarray` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`numpy.ndarray` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. head_mask (`numpy.ndarray` of shape `({0})`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ remat = nn_partitioning.remat # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbeddings with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}Embeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.word_embeddings = nn.Embed( self.config.vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.position_embeddings = nn.Embed( self.config.max_position_embeddings, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.token_type_embeddings = nn.Embed( self.config.type_vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True): # Embed inputs_embeds = self.word_embeddings(input_ids.astype("i4")) position_embeds = self.position_embeddings(position_ids.astype("i4")) token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4")) # Sum all embeddings hidden_states = inputs_embeds + token_type_embeddings + position_embeds # Layer Norm hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfAttention with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}SelfAttention(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config causal: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.head_dim = self.config.hidden_size // self.config.num_attention_heads if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError( "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads`\ : {self.config.num_attention_heads}" ) self.query = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.key = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.value = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) if self.causal: self.causal_mask = make_causal_mask( jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool" ) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,)) @nn.compact # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic=True, output_attentions: bool = False, ): # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] # get query proj query_states = self.query(hidden_states) # get key, value proj if is_cross_attention: # cross_attentions key_states = self.key(key_value_states) value_states = self.value(key_value_states) else: # self_attention key_states = self.key(hidden_states) value_states = self.value(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # handle cache prepare causal attention mask if self.causal: query_length, key_length = query_states.shape[1], key_states.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) # combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) # Mask heads if we want to if layer_head_mask is not None: attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfOutput with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}SelfOutput(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, input_tensor, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertAttention with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}Attention(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config causal: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): self.self = Flax{{cookiecutter.camelcase_modelname}}SelfAttention(self.config, dtype=self.dtype) self.output = Flax{{cookiecutter.camelcase_modelname}}SelfOutput(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states=None, init_cache=False, deterministic=True, output_attentions: bool = False, ): # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length) # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length) attn_outputs = self.self( hidden_states, attention_mask, layer_head_mask=layer_head_mask, key_value_states=key_value_states, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attn_output = attn_outputs[0] hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attn_outputs[1],) return outputs # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertIntermediate with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}Intermediate(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.activation = ACT2FN[self.config.hidden_act] def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOutput with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}Output(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states, attention_output, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + attention_output) return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayer with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}Layer(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.attention = Flax{{cookiecutter.camelcase_modelname}}Attention(self.config, dtype=self.dtype) self.intermediate = Flax{{cookiecutter.camelcase_modelname}}Intermediate(self.config, dtype=self.dtype) self.output = Flax{{cookiecutter.camelcase_modelname}}Output(self.config, dtype=self.dtype) if self.config.add_cross_attention: self.crossattention = Flax{{cookiecutter.camelcase_modelname}}Attention(self.config, causal=False, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, ): # Self Attention attention_outputs = self.attention( hidden_states, attention_mask, layer_head_mask=layer_head_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = attention_outputs[0] # Cross-Attention Block if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, key_value_states=encoder_hidden_states, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = cross_attention_outputs[0] hidden_states = self.intermediate(attention_output) hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[1],) if encoder_hidden_states is not None: outputs += (cross_attention_outputs[1],) return outputs # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayerCollection with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}LayerCollection(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): if self.gradient_checkpointing: Flax{{cookiecutter.camelcase_modelname}}CheckpointLayer = remat(Flax{{cookiecutter.camelcase_modelname}}Layer, static_argnums=(5, 6, 7)) self.layers = [ Flax{{cookiecutter.camelcase_modelname}}CheckpointLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] else: self.layers = [ Flax{{cookiecutter.camelcase_modelname}}Layer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None # Check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.shape[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for \ {head_mask.shape[0]}." ) for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer( hidden_states, attention_mask, head_mask[i] if head_mask is not None else None, encoder_hidden_states, encoder_attention_mask, init_cache, deterministic, output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states,) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEncoder with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}Encoder(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.layer = Flax{{cookiecutter.camelcase_modelname}}LayerCollection(self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return self.layer( hidden_states, attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPooler with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}Pooler(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__(self, hidden_states): cls_hidden_state = hidden_states[:, 0] cls_hidden_state = self.dense(cls_hidden_state) return nn.tanh(cls_hidden_state) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPredictionHeadTransform with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}PredictionHeadTransform(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype) self.activation = ACT2FN[self.config.hidden_act] self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return self.LayerNorm(hidden_states) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLMPredictionHead with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}LMPredictionHead(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.transform = Flax{{cookiecutter.camelcase_modelname}}PredictionHeadTransform(self.config, dtype=self.dtype) self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False) self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,)) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.transform(hidden_states) if shared_embedding is not None: hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: hidden_states = self.decoder(hidden_states) hidden_states += self.bias return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOnlyMLMHead with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}OnlyMLMHead(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 def setup(self): self.predictions = Flax{{cookiecutter.camelcase_modelname}}LMPredictionHead(self.config, dtype=self.dtype) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.predictions(hidden_states, shared_embedding=shared_embedding) return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOnlyNSPHead with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}OnlyNSPHead(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): self.seq_relationship = nn.Dense(2, dtype=self.dtype) def __call__(self, pooled_output): return self.seq_relationship(pooled_output) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainingHeads with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}PreTrainingHeads(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 def setup(self): self.predictions = Flax{{cookiecutter.camelcase_modelname}}LMPredictionHead(self.config, dtype=self.dtype) self.seq_relationship = nn.Dense(2, dtype=self.dtype) def __call__(self, hidden_states, pooled_output, shared_embedding=None): prediction_scores = self.predictions(hidden_states, shared_embedding=shared_embedding) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = {{cookiecutter.camelcase_modelname}}Config base_model_prefix = "{{cookiecutter.lowercase_modelname}}" module_class: nn.Module = None def __init__( self, config: {{cookiecutter.camelcase_modelname}}Config, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, gradient_checkpointing: bool = False, **kwargs ): module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.enable_gradient_checkpointing def enable_gradient_checkpointing(self): self._module = self.module_class( config=self.config, dtype=self.dtype, gradient_checkpointing=True, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.init_weights with Bert->{{cookiecutter.camelcase_modelname}} def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") token_type_ids = jnp.zeros_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) attention_mask = jnp.ones_like(input_ids) head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states, encoder_attention_mask, return_dict=False, ) else: module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False ) random_params = module_init_outputs["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.init_cache with Bert->{{cookiecutter.camelcase_modelname}} def init_cache(self, batch_size, max_length): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. """ # init input variables to retrieve cache input_ids = jnp.ones((batch_size, max_length)) attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init( jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True ) return unfreeze(init_variables["cache"]) @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.__call__ with Bert->{{cookiecutter.camelcase_modelname}} def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, past_key_values: dict = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # init input tensors if not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if head_mask is None: head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} if self.config.add_cross_attention: # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be # changed by FlaxBertAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=jnp.array(head_mask, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, mutable=mutable, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past_key_values = outputs outputs["past_key_values"] = unfreeze(past_key_values["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past_key_values = outputs outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] else: outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=jnp.array(head_mask, dtype="i4"), deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, ) return outputs # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertModule with Bert->{{cookiecutter.camelcase_modelname}} class Flax{{cookiecutter.camelcase_modelname}}Module(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation add_pooling_layer: bool = True gradient_checkpointing: bool = False def setup(self): self.embeddings = Flax{{cookiecutter.camelcase_modelname}}Embeddings(self.config, dtype=self.dtype) self.encoder = Flax{{cookiecutter.camelcase_modelname}}Encoder(self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.pooler = Flax{{cookiecutter.camelcase_modelname}}Pooler(self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, head_mask: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # make sure `token_type_ids` is correctly initialized when not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) # make sure `position_ids` is correctly initialized when not passed if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) hidden_states = self.embeddings( input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic ) outputs = self.encoder( hidden_states, attention_mask, head_mask=head_mask, deterministic=deterministic, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] pooled = self.pooler(hidden_states) if self.add_pooling_layer else None if not return_dict: # if pooled is None, don't return it if pooled is None: return (hidden_states,) + outputs[1:] return (hidden_states, pooled) + outputs[1:] return FlaxBaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=hidden_states, pooler_output=pooled, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) add_start_docstrings( "The bare {{cookiecutter.camelcase_modelname}} Model transformer outputting raw hidden-states without any specific head on top.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}Model(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}Module class Flax{{cookiecutter.camelcase_modelname}}ForMaskedLMModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.cls = Flax{{cookiecutter.camelcase_modelname}}OnlyMLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.{{cookiecutter.lowercase_modelname}}.variables["params"]["embeddings"]["word_embeddings"]["embedding"] else: shared_embedding = None # Compute the prediction scores logits = self.cls(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxCausalLMOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings("""{{cookiecutter.camelcase_modelname}} Model with a `language modeling` head on top for MLM training. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING) class Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForMaskedLMModule append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC ) class Flax{{cookiecutter.camelcase_modelname}}ForCausalLMModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.cls = Flax{{cookiecutter.camelcase_modelname}}OnlyMLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.{{cookiecutter.lowercase_modelname}}.variables["params"]["embeddings"]["word_embeddings"]["embedding"] else: shared_embedding = None # Compute the prediction scores logits = self.cls(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxCausalLMOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings("""{{cookiecutter.camelcase_modelname}} Model with a `language modeling` head on top for CLM training. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING) class Flax{{cookiecutter.camelcase_modelname}}ForCausalLM(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForCausalLMModule append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForCausalLM, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC ) class Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassificationModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense( self.config.num_labels, dtype=self.dtype, ) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) if not return_dict: return (logits,) + outputs[2:] return FlaxSequenceClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ {{cookiecutter.camelcase_modelname}} Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassificationModule append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxSequenceClassifierOutput, _CONFIG_FOR_DOC, ) class Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoiceModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense(1, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None # Model outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[2:] return FlaxMultipleChoiceModelOutput( logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ {{cookiecutter.camelcase_modelname}} Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoiceModule overwrite_call_docstring( Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice, {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC ) class Flax{{cookiecutter.camelcase_modelname}}ForTokenClassificationModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxTokenClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ {{cookiecutter.camelcase_modelname}} Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForTokenClassificationModule append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC ) class Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnsweringModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.qa_outputs(hidden_states) start_logits, end_logits = logits.split(self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + outputs[1:] return FlaxQuestionAnsweringModelOutput( start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ {{cookiecutter.camelcase_modelname}} Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnsweringModule append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, ) class Flax{{cookiecutter.camelcase_modelname}}ForCausalLMModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.cls = Flax{{cookiecutter.camelcase_modelname}}OnlyMLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, position_ids, token_type_ids: Optional[jnp.ndarray] = None, head_mask: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.{{cookiecutter.lowercase_modelname}}( input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.{{cookiecutter.lowercase_modelname}}.variables["params"]["embeddings"]["word_embeddings"]["embedding"] else: shared_embedding = None # Compute the prediction scores logits = self.cls(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxCausalLMOutputWithCrossAttentions( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( """ {{cookiecutter.camelcase_modelname}} Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for autoregressive tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}ForCausalLM(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): # initializing the cache batch_size, seq_length = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyway. # Thus, we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "attention_mask": extended_attention_mask, "position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 return model_kwargs append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForCausalLM, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) {# encoder_decoder #} {% else %} import math import random from functools import partial from typing import Callable, Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, unfreeze, freeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from jax.random import PRNGKey from ...utils import add_start_docstrings, replace_return_docstrings from ...modeling_flax_outputs import ( FlaxBaseModelOutput, FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput, FlaxSeq2SeqModelOutput, FlaxSeq2SeqQuestionAnsweringModelOutput, FlaxSeq2SeqSequenceClassifierOutput, ) from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, append_replace_return_docstrings, overwrite_call_docstring, ) from ...utils import logging from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "{{cookiecutter.checkpoint_identifier}}" _CONFIG_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Config" _TOKENIZER_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Tokenizer" {{cookiecutter.uppercase_modelname}}_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`~{{cookiecutter.camelcase_modelname}}Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ {{cookiecutter.uppercase_modelname}}_ENCODE_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ {{cookiecutter.uppercase_modelname}}_DECODE_INPUTS_DOCSTRING = r""" Args: decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. encoder_outputs (`tuple(tuple(jnp.ndarray)`): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray: """ Shift input ids one token to the right. """ shifted_input_ids = jnp.roll(input_ids, 1, axis=-1) shifted_input_ids = shifted_input_ids.at[(..., 0)].set(decoder_start_token_id) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids class Flax{{cookiecutter.camelcase_modelname}}Attention(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config embed_dim: int num_heads: int dropout: float = 0.0 causal: bool = False bias: bool = True dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self) -> None: self.head_dim = self.embed_dim // self.num_heads assert ( self.head_dim * self.num_heads == self.embed_dim ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})." dense = partial( nn.Dense, self.embed_dim, use_bias=self.bias, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense() self.out_proj = dense() self.dropout_layer = nn.Dropout(rate=self.dropout) if self.causal: self.causal_mask = make_causal_mask( jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool" ) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray] = None, attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] # get query proj query_states = self.q_proj(hidden_states) # get key, value proj if is_cross_attention: # cross_attentions key_states = self.k_proj(key_value_states) value_states = self.v_proj(key_value_states) else: # self_attention key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # handle cache prepare causal attention mask if self.causal: query_length, key_length = query_states.shape[1], key_states.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) # combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) return attn_output, attn_weights class Flax{{cookiecutter.camelcase_modelname}}EncoderLayer(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = Flax{{cookiecutter.camelcase_modelname}}Attention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.encoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype ) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.fc1 = nn.Dense( self.config.encoder_ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.fc2 = nn.Dense( self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype) def __call__( self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, output_attentions: bool = True, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: residual = hidden_states hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class Flax{{cookiecutter.camelcase_modelname}}EncoderLayerCollection(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ Flax{{cookiecutter.camelcase_modelname}}EncoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.encoder_layers) ] self.layerdrop = self.config.encoder_layerdrop def __call__( self, hidden_states, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for encoder_layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if not deterministic and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None) else: layer_outputs = encoder_layer( hidden_states, attention_mask, output_attentions, deterministic, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class Flax{{cookiecutter.camelcase_modelname}}DecoderLayer(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = Flax{{cookiecutter.camelcase_modelname}}Attention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, causal=True, dtype=self.dtype, ) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype) self.encoder_attn = Flax{{cookiecutter.camelcase_modelname}}Attention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype, ) self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype) self.fc1 = nn.Dense( self.config.decoder_ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.fc2 = nn.Dense( self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype) def __call__( self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, output_attentions: bool = True, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: residual = hidden_states # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache ) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, ) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs class Flax{{cookiecutter.camelcase_modelname}}DecoderLayerCollection(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ Flax{{cookiecutter.camelcase_modelname}}DecoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.decoder_layers) ] self.layerdrop = self.config.decoder_layerdrop def __call__( self, hidden_states, attention_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if not deterministic and (dropout_probability < self.layerdrop): layer_outputs = (None, None, None) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, deterministic=deterministic, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions] if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) class Flax{{cookiecutter.camelcase_modelname}}ClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" config: {{cookiecutter.camelcase_modelname}}Config inner_dim: int num_classes: int pooler_dropout: float dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense( self.inner_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) self.dropout = nn.Dropout(rate=self.pooler_dropout) self.out_proj = nn.Dense( self.num_classes, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) def __call__(self, hidden_states: jnp.ndarray, deterministic: bool): hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.dense(hidden_states) hidden_states = jnp.tanh(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.out_proj(hidden_states) return hidden_states class Flax{{cookiecutter.camelcase_modelname}}Encoder(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation embed_tokens: Optional[nn.Embed] = None def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.max_source_positions = self.config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0 if self.embed_tokens is None: self.embed_tokens = nn.Embed( self.config.vocab_size, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) # {{cookiecutter.camelcase_modelname}} is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 self.embed_positions = nn.Embed( self.config.max_position_embeddings + self.offset, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) self.layers = Flax{{cookiecutter.camelcase_modelname}}EncoderLayerCollection(self.config, self.dtype) self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype) def __call__( self, input_ids, attention_mask, position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1]) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(position_ids + self.offset) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return outputs return FlaxBaseModelOutput( last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class Flax{{cookiecutter.camelcase_modelname}}Decoder(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation embed_tokens: Optional[nn.Embed] = None def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.max_target_positions = self.config.max_position_embeddings self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0 if self.embed_tokens is None: self.embed_tokens = nn.Embed( self.config.vocab_size, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) # {{cookiecutter.camelcase_modelname}} is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 self.embed_positions = nn.Embed( self.config.max_position_embeddings + self.offset, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) self.layers = Flax{{cookiecutter.camelcase_modelname}}DecoderLayerCollection(self.config, self.dtype) self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype) def __call__( self, input_ids, attention_mask, position_ids, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1]) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale # embed positions positions = self.embed_positions(position_ids + self.offset) hidden_states = inputs_embeds + positions hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return outputs return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) class Flax{{cookiecutter.camelcase_modelname}}Module(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.shared = nn.Embed( self.config.vocab_size, self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) self.encoder = Flax{{cookiecutter.camelcase_modelname}}Encoder(self.config, dtype=self.dtype, embed_tokens=self.shared) self.decoder = Flax{{cookiecutter.camelcase_modelname}}Decoder(self.config, dtype=self.dtype, embed_tokens=self.shared) def _get_encoder_module(self): return self.encoder def _get_decoder_module(self): return self.decoder def __call__( self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) if not return_dict: return decoder_outputs + encoder_outputs return FlaxSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) class Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel(FlaxPreTrainedModel): config_class = {{cookiecutter.camelcase_modelname}}Config base_model_prefix: str = "model" module_class: nn.Module = None def __init__( self, config: {{cookiecutter.camelcase_modelname}}Config, input_shape: Tuple[int] = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") # make sure initialization pass will work for Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassificationModule input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id) attention_mask = jnp.ones_like(input_ids) decoder_input_ids = input_ids decoder_attention_mask = jnp.ones_like(input_ids) batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init( rngs, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, )["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length, encoder_outputs): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`): `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. """ # init input variables to retrieve cache decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4") decoder_attention_mask = jnp.ones_like(decoder_input_ids) decoder_position_ids = jnp.broadcast_to( jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape ) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs, ) init_variables = self.module.init( jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward, # we only need to call the decoder to init the cache ) return unfreeze(init_variables["cache"]) @add_start_docstrings({{cookiecutter.uppercase_modelname}}_ENCODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class={{cookiecutter.camelcase_modelname}}Config) def encode( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration >>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors='np') >>> encoder_outputs = model.encode(**inputs) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_ids, attention_mask, position_ids, **kwargs) return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward, ) @add_start_docstrings({{cookiecutter.uppercase_modelname}}_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class={{cookiecutter.camelcase_modelname}}Config) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, past_key_values: dict = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> import jax.numpy as jnp >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration >>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors='np') >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> last_decoder_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by Flax{{cookiecutter.camelcase_modelname}}Attention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs, ) outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past = outputs outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past = outputs outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs def __call__( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, decoder_input_ids: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # prepare encoder inputs if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) # prepare decoder inputs if decoder_input_ids is None: decoder_input_ids = shift_tokens_right( input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id ) if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) if decoder_position_ids is None: batch_size, sequence_length = decoder_input_ids.shape decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, ) @add_start_docstrings( "The bare {{cookiecutter.camelcase_modelname}} Model transformer outputting raw hidden-states without any specific head on top.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}Model(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation module_class = Flax{{cookiecutter.camelcase_modelname}}Module append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}Model, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC ) class Flax{{cookiecutter.camelcase_modelname}}ForConditionalGenerationModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros def setup(self): self.model = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype) self.lm_head = nn.Dense( self.model.shared.num_embeddings, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings)) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__( self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, position_ids=position_ids, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.model.variables["params"]["shared"]["embedding"] lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) lm_logits += self.final_logits_bias.astype(self.dtype) if not return_dict: output = (lm_logits,) + outputs[1:] return output return FlaxSeq2SeqLMOutput( logits=lm_logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( "The {{cookiecutter.uppercase_modelname}} Model with a language modeling head. Can be used for summarization.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING ) class Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGenerationModule dtype: jnp.dtype = jnp.float32 @add_start_docstrings({{cookiecutter.uppercase_modelname}}_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class={{cookiecutter.camelcase_modelname}}Config) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, past_key_values: dict = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, deterministic: bool = True, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> import jax.numpy as jnp >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration >>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors='np') >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> logits = outputs.logits ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by Flax{{cookiecutter.camelcase_modelname}}Attention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() outputs = decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = module.model.variables["params"]["shared"]["embedding"] lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: lm_logits = module.lm_head(hidden_states) lm_logits += module.final_logits_bias.astype(self.dtype) return lm_logits, outputs outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, rngs=rngs, mutable=mutable, method=_decoder_forward, ) if past_key_values is None: lm_logits, decoder_outputs = outputs else: (lm_logits, decoder_outputs), past = outputs if return_dict: outputs = FlaxCausalLMOutputWithCrossAttentions( logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, ) else: outputs = (lm_logits,) + decoder_outputs[1:] # add updated cache to model output if past_key_values is not None and return_dict: outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs def prepare_inputs_for_generation( self, decoder_input_ids, max_length, attention_mask: Optional[jax.Array] = None, decoder_attention_mask: Optional[jax.Array] = None, encoder_outputs=None, **kwargs ): # initializing the cache batch_size, seq_length = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if decoder_attention_mask is not None: position_ids = decoder_attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "encoder_attention_mask": attention_mask, "decoder_attention_mask": extended_attention_mask, "decoder_position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1 return model_kwargs FLAX_{{cookiecutter.uppercase_modelname}}_CONDITIONAL_GENERATION_DOCSTRING = """ Returns: Summarization example: ```python >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration >>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='np') >>> # Generate Summary >>> summary_ids = model.generate(inputs['input_ids']).sequences >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)) ``` Mask filling example: ```python >>> import jax >>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration >>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') >>> TXT = "My friends are <mask> but they eat too many carbs." >>> input_ids = tokenizer([TXT], return_tensors='np')['input_ids'] >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = jax.nn.softmax(logits[0, masked_index], axis=0) >>> values, predictions = jax.lax.top_k(probs, k=1) >>> tokenizer.decode(predictions).split() ``` """ overwrite_call_docstring( Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING + FLAX_{{cookiecutter.uppercase_modelname}}_CONDITIONAL_GENERATION_DOCSTRING ) append_replace_return_docstrings( Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC ) class Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassificationModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 num_labels: Optional[int] = None def setup(self): self.model = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype) self.classification_head = Flax{{cookiecutter.camelcase_modelname}}ClassificationHead( config=self.config, inner_dim=self.config.d_model, num_classes=self.num_labels if self.num_labels is not None else self.config.num_labels, pooler_dropout=self.config.classifier_dropout, ) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__( self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, position_ids=position_ids, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) hidden_states = outputs[0] # last hidden state eos_mask = jnp.where(input_ids == self.config.eos_token_id, 1, 0) # The first condition is necessary to overcome jax._src.errors.ConcretizationTypeError during JIT compilation if type(eos_mask) != jax.interpreters.partial_eval.DynamicJaxprTracer: if len(jnp.unique(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") if any(eos_mask.sum(1) == 0): raise ValueError("There are missing <eos> tokens in input_ids") # Ensure to keep 1 only for the last <eos> token for each example eos_mask_noised = eos_mask + jnp.arange(eos_mask.shape[1]) * 1e-6 eos_mask = jnp.where(eos_mask_noised == eos_mask_noised.max(1).reshape(-1, 1), 1, 0) sentence_representation = jnp.einsum("ijk, ij -> ijk", hidden_states, eos_mask).sum(1) logits = self.classification_head(sentence_representation, deterministic=deterministic) if not return_dict: output = (logits,) + outputs[1:] return output return FlaxSeq2SeqSequenceClassifierOutput( logits=logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ {{cookiecutter.camelcase_modelname}} model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassificationModule dtype = jnp.float32 append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqSequenceClassifierOutput, _CONFIG_FOR_DOC, ) class Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnsweringModule(nn.Module): config: {{cookiecutter.camelcase_modelname}}Config dtype: jnp.dtype = jnp.float32 num_labels = 2 def setup(self): self.model = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype) self.qa_outputs = nn.Dense( self.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__( self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, position_ids=position_ids, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = jnp.split(logits, logits.shape[-1], axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: output = (start_logits, end_logits) + outputs[1:] return output return FlaxSeq2SeqQuestionAnsweringModelOutput( start_logits=start_logits, end_logits=end_logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ {{cookiecutter.uppercase_modelname}} Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, ) class Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel): module_class = Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnsweringModule dtype = jnp.float32 append_call_sample_docstring( Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, ) {% endif -%}
transformers/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_flax_{{cookiecutter.lowercase_modelname}}.py/0
{ "file_path": "transformers/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_flax_{{cookiecutter.lowercase_modelname}}.py", "repo_id": "transformers", "token_count": 60288 }
389
{ "modelname": "TemplatePT", "uppercase_modelname": "TEMPLATE_PT", "lowercase_modelname": "template_pt", "camelcase_modelname": "TemplatePt", "authors": "The HuggingFace Team", "checkpoint_identifier": "brand-new-bert-base-cased", "tokenizer_type": "Based on BERT", "generate_tensorflow_pytorch_and_flax": "PyTorch", "is_encoder_decoder_model": "False" }
transformers/templates/adding_a_new_model/tests/pt-encoder-bert-tokenizer.json/0
{ "file_path": "transformers/templates/adding_a_new_model/tests/pt-encoder-bert-tokenizer.json", "repo_id": "transformers", "token_count": 148 }
390
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import os import re import sys from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, backend_device_count, execute_subprocess_async, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_accelerator, require_torch_non_multi_accelerator, slow, torch_device, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed bindir = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"): from run_translation import main # noqa set_seed(42) MARIAN_MODEL = "sshleifer/student_marian_en_ro_6_1" MBART_TINY = "sshleifer/tiny-mbart" @require_torch class TestTrainerExt(TestCasePlus): def run_seq2seq_quick( self, distributed=False, extra_args_str=None, predict_with_generate=True, do_train=True, do_eval=True, do_predict=True, ): output_dir = self.run_trainer( eval_steps=1, max_len=12, model_name=MBART_TINY, num_train_epochs=1, distributed=distributed, extra_args_str=extra_args_str, predict_with_generate=predict_with_generate, do_train=do_train, do_eval=do_eval, do_predict=do_predict, ) logs = TrainerState.load_from_json(os.path.join(output_dir, "trainer_state.json")).log_history if not do_eval: return eval_metrics = [log for log in logs if "eval_loss" in log.keys()] first_step_stats = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats last_step_stats = eval_metrics[-1] assert isinstance(last_step_stats["eval_bleu"], float) assert not math.isnan(float(last_step_stats["eval_loss"])), "eval_loss must not be `nan`" @require_torch_non_multi_accelerator def test_run_seq2seq_no_dist(self): self.run_seq2seq_quick() # verify that the trainer can handle non-distributed with n_gpu > 1 @require_torch_multi_accelerator def test_run_seq2seq_dp(self): self.run_seq2seq_quick(distributed=False) # verify that the trainer can handle distributed with n_gpu > 1 @require_torch_multi_accelerator def test_run_seq2seq_ddp(self): self.run_seq2seq_quick(distributed=True) @require_apex @require_torch_gpu def test_run_seq2seq_apex(self): # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seq2seq_quick(distributed=True, extra_args_str="--fp16 --fp16_backend=apex") # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seq2seq_quick(distributed=True, extra_args_str="--fp16 --fp16_backend=apex") @parameterized.expand(["base", "low", "high", "mixed"]) @require_torch_multi_accelerator def test_trainer_log_level_replica(self, experiment_id): # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout experiments = { # test with the default log_level - should be info and thus log info once "base": {"extra_args_str": "", "n_matches": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes "low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica "high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1}, # test with high log_level and log_level_replica - should be quiet on all processes "mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0}, } data = experiments[experiment_id] kwargs = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False} log_info_string = "Running training" with CaptureStderr() as cl: self.run_seq2seq_quick(**kwargs, extra_args_str=data["extra_args_str"]) n_matches = len(re.findall(log_info_string, cl.err)) self.assertEqual(n_matches, data["n_matches"]) @slow def test_run_seq2seq(self): output_dir = self.run_trainer( eval_steps=2, max_len=128, model_name=MARIAN_MODEL, learning_rate=3e-4, num_train_epochs=10, distributed=False, ) # Check metrics logs = TrainerState.load_from_json(os.path.join(output_dir, "trainer_state.json")).log_history eval_metrics = [log for log in logs if "eval_loss" in log.keys()] first_step_stats = eval_metrics[0] last_step_stats = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["eval_bleu"], float) # test if do_predict saves generations and metrics contents = os.listdir(output_dir) contents = {os.path.basename(p) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def test_run_seq2seq_bnb(self): from transformers.training_args import OptimizerNames def train_and_return_metrics(optim: str) -> Tuple[int, float]: extra_args = "--skip_memory_metrics 0" output_dir = self.run_trainer( max_len=128, model_name=MARIAN_MODEL, learning_rate=3e-4, num_train_epochs=1, optim=optim, distributed=True, # force run in a new process extra_args_str=extra_args, do_eval=False, do_predict=False, n_gpus_to_use=1, # to allow deterministic fixed memory usage ) # Check metrics logs = TrainerState.load_from_json(Path(output_dir, "trainer_state.json")).log_history gpu_peak_mem_mb = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20) gpu_alloc_mem_mb = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20) loss = logs[0]["train_loss"] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss gpu_peak_mem_orig, gpu_alloc_mem_orig, loss_orig = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value) gpu_peak_mem_bnb, gpu_alloc_mem_bnb, loss_bnb = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value) gpu_alloc_mem_diff = gpu_alloc_mem_orig - gpu_alloc_mem_bnb gpu_total_mem_orig = gpu_peak_mem_orig + gpu_alloc_mem_orig gpu_total_mem_bnb = gpu_peak_mem_bnb + gpu_alloc_mem_bnb gpu_total_mem_diff = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings expected_savings = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( gpu_alloc_mem_diff, expected_savings, "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got" f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and" f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB", ) self.assertGreater( gpu_total_mem_diff, expected_savings, "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got" f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and" f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB", ) self.assertEqual( loss_orig, loss_bnb, f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" ) def run_trainer( self, max_len: int, model_name: str, num_train_epochs: int, learning_rate: float = 3e-3, optim: str = "adafactor", distributed: bool = False, extra_args_str: str = None, eval_steps: int = 0, predict_with_generate: bool = True, do_train: bool = True, do_eval: bool = True, do_predict: bool = True, n_gpus_to_use: int = None, ): data_dir = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" output_dir = self.get_auto_remove_tmp_dir() args_train = f""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(num_train_epochs)} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(eval_steps)} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() args_eval = f""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(eval_steps)} """.split() args_predict = """ --do_predict """.split() args = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += f"--optim {optim}".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: n_gpus_to_use = backend_device_count(torch_device) master_port = get_torch_dist_unique_port() distributed_args = f""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() cmd = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(cmd, env=self.get_env()) else: testargs = ["run_translation.py"] + args with patch.object(sys, "argv", testargs): main() return output_dir
transformers/tests/extended/test_trainer_ext.py/0
{ "file_path": "transformers/tests/extended/test_trainer_ext.py", "repo_id": "transformers", "token_count": 6361 }
391
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def ids_tensor(shape, vocab_size, rng=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = np.array(values, dtype=jnp.int32).reshape(shape) return output def random_attention_mask(shape, rng=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=rng) # make sure that at least one token is attended to for each batch attn_mask[:, -1] = 1 return attn_mask @require_flax class FlaxGenerationTesterMixin: model_tester = None all_generative_model_classes = () def _get_input_ids_and_config(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 max_batch_size = 2 sequence_length = inputs["input_ids"].shape[-1] // 2 input_ids = inputs["input_ids"][:max_batch_size, :sequence_length] attention_mask = jnp.ones_like(input_ids) attention_mask = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens max_length = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` config.pad_token_id = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def test_greedy_generate_pt_fx(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length config.decoder_start_token_id = 0 for model_class in self.all_generative_model_classes: flax_model = model_class(config) pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, flax_model.params) flax_generation_outputs = flax_model.generate(input_ids).sequences pt_generation_outputs = pt_model.generate(torch.tensor(input_ids, dtype=torch.long)) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: flax_generation_outputs = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist()) def test_greedy_generate(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_sample_generate(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = True config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length config.num_beams = 2 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate_num_return_sequences(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length config.num_beams = 2 config.num_return_sequences = 2 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences) def test_sample_generate_logits_warper(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = True config.max_length = max_length config.temperature = 0.8 config.top_k = 10 config.top_p = 0.3 config.min_length = 1 config.forced_bos_token_id = 8 config.forced_eos_token_id = 9 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_greedy_generate_logits_warper(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.max_length = max_length config.min_length = 1 config.forced_bos_token_id = 8 config.forced_eos_token_id = 9 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate_logits_warper(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.max_length = max_length config.num_beams = 2 config.min_length = 1 config.forced_bos_token_id = 8 config.forced_eos_token_id = 9 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_greedy_generate_attn_mask(self): config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # pad attention mask on the left attention_mask = attention_mask.at[(0, 0)].set(0) config.do_sample = False config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_sample_generate_attn_mask(self): config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # pad attention mask on the left attention_mask = attention_mask.at[(0, 0)].set(0) config.do_sample = True config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate_attn_mask(self): config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # pad attention mask on the left attention_mask = attention_mask.at[(0, 0)].set(0) config.num_beams = 2 config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) @require_flax class FlaxGenerationIntegrationTests(unittest.TestCase): def test_validate_generation_inputs(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert") model = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only") encoder_input_str = "Hello world" input_ids = tokenizer(encoder_input_str, return_tensors="np").input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(ValueError, "do_samples"): model.generate(input_ids, do_samples=True) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(ValueError, "foo"): fake_model_kwargs = {"foo": "bar"} model.generate(input_ids, **fake_model_kwargs)
transformers/tests/generation/test_flax_utils.py/0
{ "file_path": "transformers/tests/generation/test_flax_utils.py", "repo_id": "transformers", "token_count": 5082 }
392
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class AlignProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) image_processor_map = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME) with open(self.image_processor_file, "w", encoding="utf-8") as fp: json.dump(image_processor_map, fp) def get_tokenizer(self, **kwargs): return BertTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): return BertTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) def get_image_processor(self, **kwargs): return EfficientNetImageProcessor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_default(self): tokenizer_slow = self.get_tokenizer() tokenizer_fast = self.get_rust_tokenizer() image_processor = self.get_image_processor() processor_slow = AlignProcessor(tokenizer=tokenizer_slow, image_processor=image_processor) processor_slow.save_pretrained(self.tmpdirname) processor_slow = AlignProcessor.from_pretrained(self.tmpdirname, use_fast=False) processor_fast = AlignProcessor(tokenizer=tokenizer_fast, image_processor=image_processor) processor_fast.save_pretrained(self.tmpdirname) processor_fast = AlignProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer, BertTokenizer) self.assertIsInstance(processor_fast.tokenizer, BertTokenizerFast) self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor, EfficientNetImageProcessor) self.assertIsInstance(processor_fast.image_processor, EfficientNetImageProcessor) def test_save_load_pretrained_additional_features(self): processor = AlignProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = AlignProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, BertTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, EfficientNetImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_image_proc = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str, padding="max_length", max_length=64) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "token_type_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), processor.model_input_names)
transformers/tests/models/align/test_processor_align.py/0
{ "file_path": "transformers/tests/models/align/test_processor_align.py", "repo_id": "transformers", "token_count": 3309 }
393
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch BEiT model. """ import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_BACKBONE_MAPPING, MODEL_MAPPING, BeitBackbone, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class BeitModelTester: def __init__( self, parent, vocab_size=100, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=4, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, out_indices=[1, 2, 3, 4], out_features=["stage1", "stage2", "stage3", "stage4"], ): self.parent = parent self.vocab_size = vocab_size self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.out_indices = out_indices self.out_features = out_features self.num_labels = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, out_indices=self.out_indices, out_features=self.out_features, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = BeitModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_backbone(self, config, pixel_values, labels, pixel_labels): model = BeitBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify hidden states self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) expected_height = expected_width = self.image_size // config.patch_size self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, self.hidden_size, expected_height, expected_width] ) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) # verify backbone works with out_features=None config.out_features = None model = BeitBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, self.hidden_size, expected_height, expected_width] ) # verify channels self.parent.assertEqual(len(model.channels), 1) def create_and_check_for_masked_lm(self, config, pixel_values, labels, pixel_labels): model = BeitForMaskedImageModeling(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.type_sequence_label_size model = BeitForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = BeitForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = BeitForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class BeitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as BEiT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( ( BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitBackbone, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = BeitModelTester(self) self.config_tester = ConfigTester(self, config_class=BeitConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds") def test_inputs_embeds(self): pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="BEiT does not support feedforward chunking yet") def test_feed_forward_chunking(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [ *get_values(MODEL_MAPPING), *get_values(MODEL_FOR_BACKBONE_MAPPING), BeitForMaskedImageModeling, ]: continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return config.use_cache = False config.return_dict = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(MODEL_MAPPING), *get_values(MODEL_FOR_BACKBONE_MAPPING), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue model = model_class(config) model.gradient_checkpointing_enable() model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def test_model_from_pretrained(self): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BeitModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class BeitModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224") if is_vision_available() else None @slow def test_inference_masked_image_modeling_head(self): model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k").to(torch_device) image_processor = self.default_image_processor image = prepare_img() pixel_values = image_processor(images=image, return_tensors="pt").pixel_values.to(torch_device) # prepare bool_masked_pos bool_masked_pos = torch.ones((1, 196), dtype=torch.bool).to(torch_device) # forward pass with torch.no_grad(): outputs = model(pixel_values=pixel_values, bool_masked_pos=bool_masked_pos) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 196, 8192)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(torch_device) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], expected_slice, atol=1e-2)) @slow def test_inference_image_classification_head_imagenet_1k(self): model = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor([-1.2385, -1.0987, -1.0108]).to(torch_device) self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4)) expected_class_idx = 281 self.assertEqual(logits.argmax(-1).item(), expected_class_idx) @slow def test_inference_image_classification_head_imagenet_22k(self): model = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k").to( torch_device ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 21841)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor([1.6881, -0.2787, 0.5901]).to(torch_device) self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4)) expected_class_idx = 2396 self.assertEqual(logits.argmax(-1).item(), expected_class_idx) @slow def test_inference_semantic_segmentation(self): model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640") model = model.to(torch_device) image_processor = BeitImageProcessor(do_resize=True, size=640, do_center_crop=False) ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image = Image.open(ds[0]["file"]) inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 150, 160, 160)) self.assertEqual(logits.shape, expected_shape) is_pillow_less_than_9 = version.parse(PIL.__version__) < version.parse("9.0.0") if is_pillow_less_than_9: expected_slice = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ], device=torch_device, ) else: expected_slice = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ], device=torch_device, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4)) @slow def test_post_processing_semantic_segmentation(self): model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640") model = model.to(torch_device) image_processor = BeitImageProcessor(do_resize=True, size=640, do_center_crop=False) ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image = Image.open(ds[0]["file"]) inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) outputs.logits = outputs.logits.detach().cpu() segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)]) expected_shape = torch.Size((500, 300)) self.assertEqual(segmentation[0].shape, expected_shape) segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs) expected_shape = torch.Size((160, 160)) self.assertEqual(segmentation[0].shape, expected_shape) @require_torch class BeitBackboneTest(unittest.TestCase, BackboneTesterMixin): all_model_classes = (BeitBackbone,) if is_torch_available() else () config_class = BeitConfig def setUp(self): self.model_tester = BeitModelTester(self)
transformers/tests/models/beit/test_modeling_beit.py/0
{ "file_path": "transformers/tests/models/beit/test_modeling_beit.py", "repo_id": "transformers", "token_count": 9540 }
394
# coding=utf-8 # Copyright 2021, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch BlenderbotSmall model. """ import tempfile import unittest from transformers import BlenderbotSmallConfig, is_torch_available from transformers.testing_utils import ( require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallTokenizer from transformers.models.blenderbot_small.modeling_blenderbot_small import ( BlenderbotSmallDecoder, BlenderbotSmallEncoder, BlenderbotSmallForCausalLM, ) def prepare_blenderbot_small_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class BlenderbotSmallModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id # forcing a certain token to be generated, sets all other tokens to -inf # if however the token to be generated is already at -inf then it can lead token # `nan` values and thus break generation self.forced_bos_token_id = None self.forced_eos_token_id = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_blenderbot_small_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, forced_bos_token_id=self.forced_bos_token_id, forced_eos_token_id=self.forced_eos_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BlenderbotSmallModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] head_mask = inputs_dict["head_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = BlenderbotSmallModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = BlenderbotSmallEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = BlenderbotSmallDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class BlenderbotSmallModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (BlenderbotSmallModel, BlenderbotSmallForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (BlenderbotSmallForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": BlenderbotSmallForConditionalGeneration, "feature-extraction": BlenderbotSmallModel, "summarization": BlenderbotSmallForConditionalGeneration, "text-generation": BlenderbotSmallForCausalLM, "text2text-generation": BlenderbotSmallForConditionalGeneration, "translation": BlenderbotSmallForConditionalGeneration, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = True test_pruning = False test_missing_keys = False # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return pipeline_test_casse_name in ("TextGenerationPipelineTests", "ConversationalPipelineTests") def setUp(self): self.model_tester = BlenderbotSmallModelTester(self) self.config_tester = ConfigTester(self, config_class=BlenderbotSmallConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = BlenderbotSmallForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) @require_torch class Blenderbot90MIntegrationTests(unittest.TestCase): ckpt = "facebook/blenderbot-90M" @cached_property def model(self): model = BlenderbotSmallForConditionalGeneration.from_pretrained(self.ckpt).to(torch_device) if torch_device == "cuda": model = model.half() return model @cached_property def tokenizer(self): return BlenderbotSmallTokenizer.from_pretrained(self.ckpt) @slow def test_90_generation_from_long_input(self): src_text = [ "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel" " like i'm going to throw up.\nand why is that?" ] model_inputs = self.tokenizer(src_text, return_tensors="pt").to(torch_device) assert isinstance(self.tokenizer, BlenderbotSmallTokenizer) generated_ids = self.model.generate(**model_inputs)[0] reply = self.tokenizer.decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True) assert reply in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", ) @slow def test_90_generation_from_short_input(self): model_inputs = self.tokenizer(["sam"], return_tensors="pt").to(torch_device) generated_utterances = self.model.generate(**model_inputs) clean_txt = self.tokenizer.decode( generated_utterances[0], skip_special_tokens=True, clean_up_tokenization_spaces=True ) assert clean_txt in ( "have you ever been to a sam club? it's a great club in the south.", "have you ever heard of sam harris? he's an american singer, songwriter, and actor.", ) class BlenderbotSmallStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = BlenderbotSmallDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = BlenderbotSmallDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class BlenderbotSmallStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (BlenderbotSmallDecoder, BlenderbotSmallForCausalLM) if is_torch_available() else () all_generative_model_classes = (BlenderbotSmallForCausalLM,) if is_torch_available() else () test_pruning = False is_encoder_decoder = False def setUp( self, ): self.model_tester = BlenderbotSmallStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=BlenderbotSmallConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients return @unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :) def test_left_padding_compatibility(self): pass
transformers/tests/models/blenderbot_small/test_modeling_blenderbot_small.py/0
{ "file_path": "transformers/tests/models/blenderbot_small/test_modeling_blenderbot_small.py", "repo_id": "transformers", "token_count": 10061 }
395
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import BloomConfig, BloomTokenizerFast, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax.numpy as jnp from transformers import FlaxBloomForCausalLM, FlaxBloomModel def prepare_bloom_inputs_dict(config, input_ids, attention_mask=None): if attention_mask is None: attention_mask = np.where(input_ids != config.pad_token_id, 1, 0) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_flax class FlaxBloomModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, n_layer=2, n_head=4, hidden_act="gelu", hidden_dropout=0.1, attention_probs_dropout_prob=0.1, eos_token_id=2, pad_token_id=1, bos_token_id=0, initializer_range=0.02, apply_residual_connection_post_layernorm=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = n_layer self.num_attention_heads = n_head self.hidden_act = hidden_act self.hidden_dropout = hidden_dropout self.attention_probs_dropout_prob = attention_probs_dropout_prob self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.initializer_range = initializer_range self.is_encoder_decoder = False self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm def prepare_config_and_inputs(self): input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size) input_ids = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.int64)), -1) config = BloomConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_probs_dropout_prob, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=False, use_cache=False, ) inputs_dict = prepare_bloom_inputs_dict(config, input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, inputs_dict): max_length = 20 model = model_class_name(config) input_ids = inputs_dict["input_ids"] attention_mask = jnp.ones((input_ids.shape[0], max_length), dtype="i4") past_key_values = model.init_cache(input_ids.shape[0], max_length) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask, past_key_values=past_key_values, ) outputs_cache_next = model( input_ids[:, -1:], attention_mask=attention_mask, past_key_values=outputs_cache.past_key_values, ) outputs = model(input_ids) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, inputs_dict): max_length = 20 model = model_class_name(config) input_ids, attention_mask = ( inputs_dict["input_ids"], inputs_dict["attention_mask"], ) attention_mask_cache = jnp.concatenate( [ attention_mask, jnp.zeros((attention_mask.shape[0], max_length - attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(input_ids.shape[0], max_length) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask_cache, past_key_values=past_key_values, ) outputs_cache_next = model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=attention_mask_cache, ) outputs = model(input_ids, attention_mask=attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class FlaxBloomModelTest(FlaxModelTesterMixin, unittest.TestCase, FlaxGenerationTesterMixin): all_model_classes = (FlaxBloomModel, FlaxBloomForCausalLM) if is_flax_available() else () all_generative_model_classes = () if is_flax_available() else () def setUp(self): self.model_tester = FlaxBloomModelTester(self) def test_use_cache_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(model_class, config, inputs_dict) def test_use_cache_forward_with_attn_mask(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, config, inputs_dict) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("bigscience/bloom-560m") input_ids = np.ones((1, 1)) * model.config.eos_token_id outputs = model(input_ids) self.assertIsNotNone(outputs) @slow @require_flax class FlaxBloomGenerationTest(unittest.TestCase): all_model_classes = (FlaxBloomForCausalLM,) if is_flax_available() else () all_generative_model_classes = () if is_flax_available() else () def setUp(self): self.model_id = "bigscience/bloom-560m" self.tokenizer = BloomTokenizerFast.from_pretrained(self.model_id, padding_side="left") self.model_tester = FlaxBloomModelTester(self) self.model = FlaxBloomForCausalLM.from_pretrained(self.model_id, from_pt=True, revision="gs555750") def test_model_batched_gen(self): # tests if the model outputs the same generation for the same batched input input_sentences = [ "Hello there is this string is definitely longer I believe that", "Hello there is this string is definitely longer I believe that", ] inputs = self.tokenizer(input_sentences, return_tensors="np", padding=True, truncation=True) sequences_fx = self.model.generate(**inputs, max_length=20).sequences self.assertEqual(sequences_fx[0].tolist(), sequences_fx[1].tolist()) def test_model_batched_padding_left(self): # tests if the model outputs the same generation for an input that is part of a batch # and a single input input_sentences_batch = [ "Hello there is this string is definitely longer I believe that", "Hi I want to order", ] inputs = self.tokenizer(input_sentences_batch, return_tensors="np", padding=True, truncation=True) sequences_fx_batch = self.model.generate(**inputs, max_length=20).sequences input_sentence_simple = "Hi I want to order" inputs_simple = self.tokenizer(input_sentence_simple, return_tensors="np") sequences_fx_simple = self.model.generate(**inputs_simple, max_length=20).sequences self.assertEqual(sequences_fx_batch[1][6:].tolist(), sequences_fx_simple[0][:-6].tolist()) def test_batch_generated_text(self): input_sentences = [ "Hello what is", "Running a quick test with the", ] inputs = self.tokenizer(input_sentences, return_tensors="np", padding=True, truncation=True) generated_ids = self.model.generate(**inputs, max_length=20).sequences generated_text = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True) # these generations match those of the PyTorch model, ensuring correctness EXPECTED_GENERATIONS = [ "Hello what is the best way to get the data from the server? I have tried", "Running a quick test with the following command:\nsudo apt-get install python3\nsudo apt-get install python2", ] self.assertListEqual(generated_text, EXPECTED_GENERATIONS)
transformers/tests/models/bloom/test_modeling_flax_bloom.py/0
{ "file_path": "transformers/tests/models/bloom/test_modeling_flax_bloom.py", "repo_id": "transformers", "token_count": 4307 }
396
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch CLIPSeg model. """ import inspect import os import tempfile import unittest import numpy as np import requests import transformers from transformers import MODEL_MAPPING, CLIPSegConfig, CLIPSegProcessor, CLIPSegTextConfig, CLIPSegVisionConfig from transformers.models.auto import get_values from transformers.testing_utils import ( is_flax_available, is_pt_flax_cross_test, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegTextModel, CLIPSegVisionModel from transformers.models.clipseg.modeling_clipseg import CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image if is_flax_available(): import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) class CLIPSegVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return CLIPSegVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = CLIPSegVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class CLIPSegVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as CLIPSeg does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (CLIPSegVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = CLIPSegVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=CLIPSegVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="CLIPSeg does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="CLIPSegVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="CLIPSegVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CLIPSegVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class CLIPSegTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return CLIPSegTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = CLIPSegTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class CLIPSegTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (CLIPSegTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False model_split_percents = [0.5, 0.8, 0.9] def setUp(self): self.model_tester = CLIPSegTextModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPSegTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="CLIPSeg does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="CLIPSegTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="CLIPSegTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CLIPSegTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class CLIPSegModelTester: def __init__( self, parent, text_kwargs=None, vision_kwargs=None, is_training=True, # This should respect the `num_hidden_layers` in `CLIPSegVisionModelTester` extract_layers=(1,), ): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = CLIPSegTextModelTester(parent, **text_kwargs) self.vision_model_tester = CLIPSegVisionModelTester(parent, **vision_kwargs) self.is_training = is_training self.extract_layers = extract_layers def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return CLIPSegConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64, reduce_dim=32, extract_layers=self.extract_layers, ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = CLIPSegModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def create_and_check_model_for_image_segmentation(self, config, input_ids, attention_maks, pixel_values): model = CLIPSegForImageSegmentation(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values) self.parent.assertEqual( result.logits.shape, ( self.vision_model_tester.batch_size, self.vision_model_tester.image_size, self.vision_model_tester.image_size, ), ) self.parent.assertEqual( result.conditional_embeddings.shape, (self.text_model_tester.batch_size, config.projection_dim) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict @require_torch class CLIPSegModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CLIPSegModel, CLIPSegForImageSegmentation) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": CLIPSegModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): # CLIPSegForImageSegmentation requires special treatment if return_labels: if model_class.__name__ == "CLIPSegForImageSegmentation": batch_size, _, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = torch.zeros( [batch_size, height, width], device=torch_device, dtype=torch.float ) return inputs_dict def setUp(self): self.model_tester = CLIPSegModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_for_image_segmentation(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="CLIPSegModel does not have input/output embeddings") def test_model_common_attributes(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass # override as the some parameters require custom initialization def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if "logit_scale" in name: self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif "film" in name or "transposed_conv" in name or "reduce" in name: # those parameters use PyTorch' default nn.Linear initialization scheme pass else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # CLIPSeg needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save CLIPSegConfig and check if we can load CLIPSegVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = CLIPSegVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save CLIPSegConfig and check if we can load CLIPSegTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = CLIPSegTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) # overwrite from common since FlaxCLIPSegModel returns nested output # which is not supported in the common test @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): return fx_model_class = getattr(transformers, fx_model_class_name) # load Flax class fx_model = fx_model_class(config, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() # convert inputs to Flax fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_outputs = fx_model(**fx_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**fx_inputs).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2) # overwrite from common since FlaxCLIPSegModel returns nested output # which is not supported in the common test @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # load corresponding PyTorch class pt_model = model_class(config).eval() # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): # no flax model exists for this class return fx_model_class = getattr(transformers, fx_model_class_name) # load Flax class fx_model = fx_model_class(config, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_outputs = fx_model(**fx_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs_loaded[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) def test_training(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class in get_values(MODEL_MAPPING): continue print("Model class:", model_class) model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) for k, v in inputs.items(): print(k, v.shape) loss = model(**inputs).loss loss.backward() @slow def test_model_from_pretrained(self): for model_name in CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CLIPSegModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @require_vision @require_torch class CLIPSegModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_segmentation(self): model_name = "CIDAS/clipseg-rd64-refined" processor = CLIPSegProcessor.from_pretrained(model_name) model = CLIPSegForImageSegmentation.from_pretrained(model_name).to(torch_device) image = prepare_img() texts = ["a cat", "a remote", "a blanket"] inputs = processor(text=texts, images=[image] * len(texts), padding=True, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the predicted masks self.assertEqual( outputs.logits.shape, torch.Size((3, 352, 352)), ) expected_masks_slice = torch.tensor( [[-7.4613, -7.4785, -7.3628], [-7.3268, -7.0899, -7.1333], [-6.9838, -6.7900, -6.8913]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_masks_slice, atol=1e-3)) # verify conditional and pooled output expected_conditional = torch.tensor([0.5601, -0.0314, 0.1980]).to(torch_device) expected_pooled_output = torch.tensor([0.5036, -0.2681, -0.2644]).to(torch_device) self.assertTrue(torch.allclose(outputs.conditional_embeddings[0, :3], expected_conditional, atol=1e-3)) self.assertTrue(torch.allclose(outputs.pooled_output[0, :3], expected_pooled_output, atol=1e-3))
transformers/tests/models/clipseg/test_modeling_clipseg.py/0
{ "file_path": "transformers/tests/models/clipseg/test_modeling_clipseg.py", "repo_id": "transformers", "token_count": 14630 }
397
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch ConvBERT model. """ import os import tempfile import unittest from transformers import ConvBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertModel, ) from transformers.models.convbert.modeling_convbert import CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST class ConvBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return ConvBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ConvBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ConvBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ConvBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ConvBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ConvBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = ConvBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class ConvBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ConvBertModel, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": ConvBertModel, "fill-mask": ConvBertForMaskedLM, "question-answering": ConvBertForQuestionAnswering, "text-classification": ConvBertForSequenceClassification, "token-classification": ConvBertForTokenClassification, "zero-shot": ConvBertForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_head_masking = False def setUp(self): self.model_tester = ConvBertModelTester(self) self.config_tester = ConfigTester(self, config_class=ConvBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ConvBertModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) @slow @require_torch_accelerator def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # ConvBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == ConvBertForMultipleChoice: return config.torchscript = True model = model_class(config=config) inputs_dict = self._prepare_for_class(inputs_dict, model_class) traced_model = torch.jit.trace( model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(traced_model, os.path.join(tmp, "traced_model.pt")) loaded = torch.jit.load(os.path.join(tmp, "traced_model.pt"), map_location=torch_device) loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device)) def test_model_for_input_embeds(self): batch_size = 2 seq_length = 10 inputs_embeds = torch.rand([batch_size, seq_length, 768], device=torch_device) config = self.model_tester.get_config() model = ConvBertModel(config=config) model.to(torch_device) model.eval() result = model(inputs_embeds=inputs_embeds) self.assertEqual(result.last_hidden_state.shape, (batch_size, seq_length, config.hidden_size)) def test_reducing_attention_heads(self): config, *inputs_dict = self.model_tester.prepare_config_and_inputs() config.head_ratio = 4 self.model_tester.create_and_check_for_masked_lm(config, *inputs_dict) @require_torch class ConvBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = ConvBertModel.from_pretrained("YituTech/conv-bert-base") input_ids = torch.tensor([[1, 2, 3, 4, 5, 6]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 6, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.0864, -0.4898, -0.3677], [0.1434, -0.2952, -0.7640], [-0.0112, -0.4432, -0.5432]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/convbert/test_modeling_convbert.py/0
{ "file_path": "transformers/tests/models/convbert/test_modeling_convbert.py", "repo_id": "transformers", "token_count": 9532 }
398
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import CTRLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.modeling_tf_utils import keras from transformers.models.ctrl.modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, ) class TFCTRLModelTester(object): def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_token_type_ids = True self.use_input_mask = True self.use_labels = True self.use_mc_token_ids = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None self.pad_token_id = self.vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = CTRLConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, dff=self.intermediate_size, # hidden_act=self.hidden_act, # hidden_dropout_prob=self.hidden_dropout_prob, # attention_probs_dropout_prob=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, # type_vocab_size=self.type_vocab_size, # initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFCTRLModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, None, input_mask] # None is the input for 'past' result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_ctrl_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFCTRLLMHeadModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_ctrl_for_sequence_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): config.num_labels = self.num_labels sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) inputs = { "input_ids": input_ids, "token_type_ids": token_type_ids, "labels": sequence_labels, } model = TFCTRLForSequenceClassification(config) result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFCTRLModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFCTRLModel, TFCTRLLMHeadModel, TFCTRLForSequenceClassification) if is_tf_available() else () all_generative_model_classes = (TFCTRLLMHeadModel,) if is_tf_available() else () pipeline_model_mapping = ( { "feature-extraction": TFCTRLModel, "text-classification": TFCTRLForSequenceClassification, "text-generation": TFCTRLLMHeadModel, "zero-shot": TFCTRLForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def setUp(self): self.model_tester = TFCTRLModelTester(self) self.config_tester = ConfigTester(self, config_class=CTRLConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_ctrl_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*config_and_inputs) def test_ctrl_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_lm_head(*config_and_inputs) def test_ctrl_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_for_sequence_classification(*config_and_inputs) def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() list_lm_models = [TFCTRLLMHeadModel] list_other_models_with_output_ebd = [TFCTRLForSequenceClassification] for model_class in self.all_model_classes: model = model_class(config) model.build_in_name_scope() # may be needed for the get_bias() call below assert isinstance(model.get_input_embeddings(), keras.layers.Layer) if model_class in list_lm_models: x = model.get_output_embeddings() assert isinstance(x, keras.layers.Layer) name = model.get_bias() assert isinstance(name, dict) for k, v in name.items(): assert isinstance(v, tf.Variable) elif model_class in list_other_models_with_output_ebd: x = model.get_output_embeddings() assert isinstance(x, keras.layers.Layer) name = model.get_bias() assert name is None else: x = model.get_output_embeddings() assert x is None name = model.get_bias() assert name is None @slow def test_model_from_pretrained(self): for model_name in TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFCTRLModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFCTRLModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_ctrl(self): model = TFCTRLLMHeadModel.from_pretrained("Salesforce/ctrl") input_ids = tf.convert_to_tensor([[11859, 0, 1611, 8]], dtype=tf.int32) # Legal the president is expected_output_ids = [ 11859, 0, 1611, 8, 5, 150, 26449, 2, 19, 348, 469, 3, 2595, 48, 20740, 246533, 246533, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
transformers/tests/models/ctrl/test_modeling_tf_ctrl.py/0
{ "file_path": "transformers/tests/models/ctrl/test_modeling_tf_ctrl.py", "repo_id": "transformers", "token_count": 4926 }
399
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import DebertaV2Config, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaV2ForMaskedLM, TFDebertaV2ForMultipleChoice, TFDebertaV2ForQuestionAnswering, TFDebertaV2ForSequenceClassification, TFDebertaV2ForTokenClassification, TFDebertaV2Model, ) class TFDebertaV2ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, relative_attention=False, position_biased_input=True, pos_att_type="None", num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.relative_attention = relative_attention self.position_biased_input = position_biased_input self.pos_att_type = pos_att_type self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = DebertaV2Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, initializer_range=self.initializer_range, return_dict=True, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDebertaV2Model(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDebertaV2ForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFDebertaV2ForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFDebertaV2ForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDebertaV2ForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFDebertaV2ForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFDebertaModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFDebertaV2Model, TFDebertaV2ForMaskedLM, TFDebertaV2ForQuestionAnswering, TFDebertaV2ForMultipleChoice, TFDebertaV2ForSequenceClassification, TFDebertaV2ForTokenClassification, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFDebertaV2Model, "fill-mask": TFDebertaV2ForMaskedLM, "question-answering": TFDebertaV2ForQuestionAnswering, "text-classification": TFDebertaV2ForSequenceClassification, "token-classification": TFDebertaV2ForTokenClassification, "zero-shot": TFDebertaV2ForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFDebertaV2ModelTester(self) self.config_tester = ConfigTester(self, config_class=DebertaV2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TFDebertaV2Model.from_pretrained("kamalkraj/deberta-v2-xlarge") self.assertIsNotNone(model) @require_tf class TFDeBERTaV2ModelIntegrationTest(unittest.TestCase): @unittest.skip(reason="Model not available yet") def test_inference_masked_lm(self): pass @slow def test_inference_no_head(self): model = TFDebertaV2Model.from_pretrained("kamalkraj/deberta-v2-xlarge") input_ids = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) attention_mask = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) output = model(input_ids, attention_mask=attention_mask)[0] expected_slice = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4], expected_slice, atol=1e-4)
transformers/tests/models/deberta_v2/test_modeling_tf_deberta_v2.py/0
{ "file_path": "transformers/tests/models/deberta_v2/test_modeling_tf_deberta_v2.py", "repo_id": "transformers", "token_count": 5454 }
400
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Donut Swin model. """ import collections import unittest from transformers import DonutSwinConfig from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import is_torch_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import DonutSwinModel from transformers.models.donut.modeling_donut_swin import DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST class DonutSwinModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=True, type_sequence_label_size=10, encoder_stride=8, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.type_sequence_label_size = type_sequence_label_size self.encoder_stride = encoder_stride def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return DonutSwinConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def create_and_check_model(self, config, pixel_values, labels): model = DonutSwinModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DonutSwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DonutSwinModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": DonutSwinModel} if is_torch_available() else {} fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = DonutSwinModelTester(self) self.config_tester = ConfigTester(self, config_class=DonutSwinConfig, embed_dim=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_inputs_embeds(self): # DonutSwin does not use inputs_embeds pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = len(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True window_size_squared = config.window_size**2 model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # DonutSwin has a different seq_length patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], ) reshaped_hidden_states = outputs.reshaped_hidden_states self.assertEqual(len(reshaped_hidden_states), expected_num_layers) batch_size, num_channels, height, width = reshaped_hidden_states[0].shape reshaped_hidden_states = ( reshaped_hidden_states[0].view(batch_size, num_channels, height * width).permute(0, 2, 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], ) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) def test_hidden_states_output_with_padding(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.patch_size = 3 image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) @slow def test_model_from_pretrained(self): for model_name in DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DonutSwinModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
transformers/tests/models/donut/test_modeling_donut_swin.py/0
{ "file_path": "transformers/tests/models/donut/test_modeling_donut_swin.py", "repo_id": "transformers", "token_count": 6320 }
401
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import require_torch, require_torchvision, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_torchvision_available(): import torchvision.transforms as transforms if is_vision_available(): from PIL import Image from transformers import IdeficsImageProcessor class IdeficsImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, size=None, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], ): size = size if size is not None else {"shortest_edge": 30} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution # self.size = size self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "image_size": self.image_size, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to IdeficsImageProcessor, assuming do_resize is set to True with a scalar size and size_divisor. """ if not batched: size = self.image_size image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] scale = size / min(w, h) if h < w: newh, neww = size, scale * w else: newh, neww = scale * h, size max_size = int((1333 / 800) * size) if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) expected_height, expected_width = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return (self.num_channels, height, width) def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class IdeficsImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = IdeficsImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = IdeficsImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "image_size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertNotEqual(image_processor.image_size, 30) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, image_size=42) self.assertEqual(image_processor.image_size, 42) @require_torchvision def test_torchvision_numpy_transforms_equivalency(self): # as we had to reimplement the torchvision transforms using transformers utils we must check # they both do the same image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) image_processor = self.image_processing_class(**self.image_processor_dict) print(image_inputs) def convert_to_rgb(image): # `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background # for transparent images. The call to `alpha_composite` handles this case if image.mode == "RGB": return image image_rgba = image.convert("RGBA") background = Image.new("RGBA", image_rgba.size, (255, 255, 255)) alpha_composite = Image.alpha_composite(background, image_rgba) alpha_composite = alpha_composite.convert("RGB") return alpha_composite image_size = image_processor.image_size image_mean = image_processor.image_mean image_std = image_processor.image_std transform = transforms.Compose( [ convert_to_rgb, transforms.Resize((image_size, image_size), interpolation=transforms.InterpolationMode.BICUBIC), transforms.ToTensor(), transforms.Normalize(mean=image_mean, std=image_std), ] ) pixel_values_transform_implied = image_processor(image_inputs, transform=None) pixel_values_transform_supplied = image_processor(image_inputs, transform=transform) torch.testing.assert_close(pixel_values_transform_implied, pixel_values_transform_supplied, rtol=0.0, atol=0.0) @unittest.skip("not supported") def test_call_numpy(self): pass @unittest.skip("not supported") def test_call_numpy_4_channels(self): pass @unittest.skip("not supported") def test_call_pil(self): pass @unittest.skip("not supported") def test_call_pytorch(self): pass
transformers/tests/models/idefics/test_image_processing_idefics.py/0
{ "file_path": "transformers/tests/models/idefics/test_image_processing_idefics.py", "repo_id": "transformers", "token_count": 3182 }
402
# coding=utf-8 # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import tempfile import unittest import numpy as np import pytest import requests from transformers.testing_utils import ( get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, require_vision, ) from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, CLIPImageProcessor, Kosmos2Processor, PreTrainedTokenizerFast, XLMRobertaTokenizer, XLMRobertaTokenizerFast, ) SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers @require_vision class Kosmos2ProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = CLIPImageProcessor() # We have a SentencePiece fixture for testing slow_tokenizer = XLMRobertaTokenizer(SAMPLE_VOCAB) fast_tokenizer = XLMRobertaTokenizerFast(__slow_tokenizer=slow_tokenizer) processor = Kosmos2Processor(image_processor, fast_tokenizer) processor.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_additional_features(self): processor = Kosmos2Processor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = Kosmos2Processor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, CLIPImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_image_processor = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_image_processor.keys(): self.assertAlmostEqual(input_image_processor[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "This is a test" encoded_processor = processor(text=input_str, add_eos_token=True) encoded_tok = tokenizer(input_str, return_token_type_ids=False) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "This is a test" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual( list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask", "image_embeds_position_mask"] ) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "This is a test" image_input = self.prepare_image_inputs() # both image and text inputs = processor(text=input_str, images=image_input) self.assertListEqual( list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask", "image_embeds_position_mask"] ) # only text inputs = processor(text=input_str) self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"]) # only image inputs = processor(images=image_input) self.assertListEqual(list(inputs.keys()), ["pixel_values"]) @require_torch def test_full_processor(self): url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/two_dogs.jpg" processor = Kosmos2Processor.from_pretrained("microsoft/kosmos-2-patch14-224") # test with different input formats. # fmt: off texts = [ # no phrase "<grounding> Two puppies sit in a field of grass.", # 1 phrase "<grounding> <phrase> Two puppies </phrase> sit in a field of grass.", # 2 phrases "<grounding> <phrase> Two puppies </phrase> sit in a field of <phrase> grass </phrase>.", # 2 phrases: bboxes already specified for the 1st phrase "<grounding> <phrase> Two puppies </phrase> <object> <patch_index_0079> <patch_index_1016> </delimiter_of_multi_objects/> <patch_index_0135> <patch_index_1008> </object> sit in a field of <phrase> grass </phrase>.", ] # fmt: on image = Image.open(requests.get(url, stream=True).raw) # To match the official (microsoft) Kosmos-2 demo from which the expected values here are grabbed image_path = os.path.join(self.tmpdirname, "image.jpg") image.save(image_path) image = Image.open(image_path) # fmt: off bboxes = [ [None, []], [[None], [[]], [(79, 1016)], [[(79, 1016)]], [[(79, 1016), (135, 1008)]]], [[[(79, 1016), (135, 1008)], None], [[(79, 1016), (135, 1008)], []], [[(79, 1016), (135, 1008)], (480, 1023)], [[(79, 1016), (135, 1008)], [(480, 1023)]]], [[None, [(480, 1023)]]], ] # fmt: on batch_image = [image] * 4 batch_text = [texts[0], texts[1], texts[1], texts[2]] batch_bboxes = [ None, # no phrase [[]], # 1 phrase: no bbox [(79, 1016)], # 1 phrase: 1 bbox [[(79, 1016), (135, 1008)], (480, 1023)], # 2 phrase: 2 bboxes + 1 bbox ] # fmt: off expected_input_ids = [ [0, 64012, 1264, 17772, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64010, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 64007, 4464, 64008, 106, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 64007, 4464, 64008, 64009, 64493, 65036, 64010, 106, 4, 2], ] # fmt: on EXPECTED_PIXEL_VALUES_1 = np.array( [ [ [-0.6535852551460266, -0.6389868259429932, -0.6243883967399597], [-0.6535852551460266, -0.6389868259429932, -0.6243883967399597], [-0.6243883967399597, -0.6243883967399597, -0.5951915383338928], ], [ [-0.20629698038101196, -0.19128920137882233, -0.19128920137882233], [-0.20629698038101196, -0.19128920137882233, -0.17628143727779388], [-0.2213047444820404, -0.20629698038101196, -0.16127367317676544], ], [ [-0.5843556523323059, -0.5701355338096619, -0.5701355338096619], [-0.5843556523323059, -0.5701355338096619, -0.5559154152870178], [-0.5843556523323059, -0.5559154152870178, -0.5416953563690186], ], ] ) EXPECTED_PIXEL_VALUES_2 = np.array( [ [ [-0.4346088469028473, -0.47840413451194763, -0.7849710583686829], [-0.5221993923187256, -0.5076009631156921, -0.755774199962616], [-0.5221993923187256, -0.5076009631156921, -0.7411757707595825], ], [ [-0.2813358008861542, -0.2963435649871826, -0.431413471698761], [-0.26632803678512573, -0.2963435649871826, -0.4764367938041687], [-0.2213047444820404, -0.2813358008861542, -0.49144455790519714], ], [ [-0.5701355338096619, -0.641235888004303, -0.7549964189529419], [-0.5843556523323059, -0.641235888004303, -0.7834365367889404], [-0.5559154152870178, -0.641235888004303, -0.7834365367889404], ], ] ) def check(texts, bboxes, expected_input_ids): outputs = processor(images=None, text=texts, bboxes=bboxes, add_eos_token=True) self.assertListEqual(outputs.input_ids, expected_input_ids) # no phrase check(texts[0], bboxes[0][0], expected_input_ids[0]) # no phrase check(texts[0], bboxes[0][1], expected_input_ids[0]) # 1 phrase: no bbox check(texts[1], bboxes[1][0], expected_input_ids[1]) # 1 phrase: no bbox check(texts[1], bboxes[1][1], expected_input_ids[1]) # 1 phrase: 1 bbox check(texts[1], bboxes[1][2], expected_input_ids[2]) # 1 phrase: 1 bbox check(texts[1], bboxes[1][3], expected_input_ids[2]) # 1 phrase: 2 bboxes check(texts[1], bboxes[1][4], expected_input_ids[3]) # could not contain `[None]` with pytest.raises(ValueError): _ = processor.preprocess_examples(images=None, texts=texts[1], bboxes=[[None]]) # 2 phrase: 2 bboxes + no bbox check(texts[2], bboxes[2][0], expected_input_ids[4]) # 2 phrase: 2 bboxes + no bbox check(texts[2], bboxes[2][1], expected_input_ids[4]) # 2 phrase: 2 bboxes + 1 bbox check(texts[2], bboxes[2][2], expected_input_ids[5]) # 2 phrase: 2 bboxes + 1 bbox check(texts[2], bboxes[2][3], expected_input_ids[5]) # 2 phrase: no box (as already specified in the text) + 1 bbox check(texts[3], bboxes[3][0], expected_input_ids[5]) # could not contain `[None]` with pytest.raises(ValueError): _ = processor.preprocess_examples(images=None, texts=texts[2], bboxes=[[(79, 1016), (135, 1008)], [None]]) # test batch outputs = processor( images=None, text=batch_text, bboxes=batch_bboxes, add_eos_token=True, ) self.assertListEqual( outputs.input_ids, [expected_input_ids[0], expected_input_ids[1], expected_input_ids[2], expected_input_ids[5]], ) # test batch with padding (without `return_tensors`) outputs = processor( images=None, text=batch_text, bboxes=batch_bboxes, padding=True, add_eos_token=True, ) # padding on the right self.assertListEqual( outputs.input_ids[0], expected_input_ids[0] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) self.assertListEqual( outputs.attention_mask[0], [1] * len(expected_input_ids[0]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) # no padding for the longest sequence self.assertListEqual(outputs.input_ids[-1], expected_input_ids[5]) self.assertListEqual(outputs.attention_mask[-1], [1] * len(expected_input_ids[5])) # test batch with padding (with `return_tensors`) outputs = processor( images=None, text=batch_text, bboxes=batch_bboxes, return_tensors="pt", padding=True, add_eos_token=True, ) # padding on the right self.assertListEqual( outputs.input_ids.numpy().tolist()[0], expected_input_ids[0] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) self.assertListEqual( outputs.attention_mask.numpy().tolist()[0], [1] * len(expected_input_ids[0]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) # no padding for the longest sequence self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], expected_input_ids[5]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], [1] * len(expected_input_ids[5])) # test with image num_image_tokens = 64 outputs = processor(images=image, text=texts[0], bboxes=None, add_eos_token=True) self.assertTupleEqual(outputs.pixel_values[0].shape, (3, 224, 224)) self.assertListEqual( outputs.input_ids, [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:], ) self.assertListEqual( outputs.image_embeds_position_mask, [0] * 2 + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[0]) - 1), ) np.testing.assert_allclose(outputs.pixel_values[0][:3, :3, :3], EXPECTED_PIXEL_VALUES_1, atol=1e-9) np.testing.assert_allclose(outputs.pixel_values[0][:3, -3:, -3:], EXPECTED_PIXEL_VALUES_2, atol=1e-9) # test with image in batch (right padding) outputs = processor( images=batch_image, text=batch_text, bboxes=batch_bboxes, return_tensors="pt", padding=True, add_eos_token=True, ) self.assertTupleEqual(outputs.pixel_values.shape, (4, 3, 224, 224)) np.testing.assert_allclose( outputs.pixel_values[:, :3, :3, :3].numpy(), [EXPECTED_PIXEL_VALUES_1] * len(batch_image), atol=1e-9 ) np.testing.assert_allclose( outputs.pixel_values[:, :3, -3:, -3:].numpy(), [EXPECTED_PIXEL_VALUES_2] * len(batch_image), atol=1e-9 ) # padding on the right: the `[1:]` below is because the part for `BOS` is already added in the beginning of each (dynamically computed) expected value # noqa # fmt: off EXPECTED_IDS_BATCH_RIGHT_PADDING = [ [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[5][1:], ] EXPECTED_MASK_BATCH_RIGHT_PADDING = [ [1, 1] + [1] * num_image_tokens + [1] + [1] * len(expected_input_ids[0][1:]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), [1] * (2 + num_image_tokens + len(expected_input_ids[5])), ] # fmt: on self.assertListEqual(outputs.input_ids.numpy().tolist()[0], EXPECTED_IDS_BATCH_RIGHT_PADDING[0]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[0], EXPECTED_MASK_BATCH_RIGHT_PADDING[0]) self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], EXPECTED_IDS_BATCH_RIGHT_PADDING[-1]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], EXPECTED_MASK_BATCH_RIGHT_PADDING[-1]) self.assertListEqual( outputs.image_embeds_position_mask.numpy().tolist(), [[0, 0] + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[5]) - 1)] * len(batch_image), ) processor = Kosmos2Processor.from_pretrained("microsoft/kosmos-2-patch14-224", padding_side="left") # test with image in batch (left padding) outputs = processor( images=batch_image, text=batch_text, bboxes=batch_bboxes, return_tensors="pt", padding=True, add_eos_token=True, ) # padding on the left: the `[1:]` below is because the part for `BOS` is already added in the beginning of each (dynamically computed) expected value # noqa # fmt: off EXPECTED_IDS_BATCH = [ [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:], [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[5][1:], ] EXPECTED_MASK_BATCH =[ [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [1, 1] + [1] * num_image_tokens + [1] + [1] * len(expected_input_ids[0][1:]), [1] * (2 + num_image_tokens + len(expected_input_ids[5])), ] EXPECTED_IMG_POS_MASK_BATCH = [ [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [0, 0] + [1] * num_image_tokens + [0] + [0] * len(expected_input_ids[0][1:]), [0, 0] + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[5]) - 1), ] # fmt: on self.assertListEqual(outputs.input_ids.numpy().tolist()[0], EXPECTED_IDS_BATCH[0]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[0], EXPECTED_MASK_BATCH[0]) self.assertListEqual(outputs.image_embeds_position_mask.numpy().tolist()[0], EXPECTED_IMG_POS_MASK_BATCH[0]) # no padding for the longest sequence self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], EXPECTED_IDS_BATCH[-1]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], EXPECTED_MASK_BATCH[-1]) self.assertListEqual(outputs.image_embeds_position_mask.numpy().tolist()[-1], EXPECTED_IMG_POS_MASK_BATCH[-1])
transformers/tests/models/kosmos2/test_processor_kosmos2.py/0
{ "file_path": "transformers/tests/models/kosmos2/test_processor_kosmos2.py", "repo_id": "transformers", "token_count": 9619 }
403
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): import torch from transformers import GPT2LMHeadModel @require_torch @require_sentencepiece @require_tokenizers class MegatronGPT2IntegrationTest(unittest.TestCase): @slow @unittest.skip("Model is not available.") def test_inference_no_head(self): directory = "nvidia/megatron-gpt2-345m/" if "MYDIR" in os.environ: directory = os.path.join(os.environ["MYDIR"], directory) model = GPT2LMHeadModel.from_pretrained(directory) model.to(torch_device) model.half() input_ids = torch.tensor( [[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]], device=torch_device, dtype=torch.long, ) with torch.no_grad(): output = model(input_ids).logits expected_shape = torch.Size((1, 9, 50257)) self.assertEqual(output.shape, expected_shape) expected_diag = torch.tensor( [ 4.9414, -0.2920, -1.2148, -4.0273, -0.5161, -5.2109, -1.2412, -1.8301, -1.7734, -4.7148, -0.2317, -1.0811, -2.1777, 0.4141, -3.7969, -4.0586, -2.5332, -3.3809, 4.3867, ], device=torch_device, dtype=torch.half, ) for i in range(19): r, c = 8 * i // 17, 2792 * i # along the diagonal computed, expected = output[0, r, c], expected_diag[i] msg = f"row={r} col={c} computed={computed} expected={expected}" self.assertAlmostEqual(computed, expected, delta=1e-4, msg=msg)
transformers/tests/models/megatron_gpt2/test_modeling_megatron_gpt2.py/0
{ "file_path": "transformers/tests/models/megatron_gpt2/test_modeling_megatron_gpt2.py", "repo_id": "transformers", "token_count": 1284 }
404
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class OpenAIGPTModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.pad_token_id = self.vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = OpenAIGPTConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, # intermediate_size=self.intermediate_size, # hidden_act=self.hidden_act, # hidden_dropout_prob=self.hidden_dropout_prob, # attention_probs_dropout_prob=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, # type_vocab_size=self.type_vocab_size, # initializer_range=self.initializer_range pad_token_id=self.pad_token_id, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def create_and_check_openai_gpt_model(self, config, input_ids, head_mask, token_type_ids, *args): model = OpenAIGPTModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_lm_head_model(self, config, input_ids, head_mask, token_type_ids, *args): model = OpenAIGPTLMHeadModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_double_lm_head_model(self, config, input_ids, head_mask, token_type_ids, *args): model = OpenAIGPTDoubleHeadsModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_openai_gpt_for_sequence_classification( self, config, input_ids, head_mask, token_type_ids, *args ): config.num_labels = self.num_labels model = OpenAIGPTForSequenceClassification(config) model.to(torch_device) model.eval() sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) result = model(input_ids, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class OpenAIGPTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) all_generative_model_classes = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly pipeline_model_mapping = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False # special case for DoubleHeads model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict["input_ids"] = inputs_dict["labels"] inputs_dict["token_type_ids"] = inputs_dict["labels"] inputs_dict["mc_token_ids"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=torch_device, ) inputs_dict["mc_labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = OpenAIGPTModelTester(self) self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_openai_gpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs) def test_openai_gpt_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_openai_gpt_double_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs) def test_openai_gpt_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = OpenAIGPTModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class OPENAIGPTModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_openai_gpt(self): model = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt") model.to(torch_device) input_ids = torch.tensor([[481, 4735, 544]], dtype=torch.long, device=torch_device) # the president is expected_output_ids = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
transformers/tests/models/openai/test_modeling_openai.py/0
{ "file_path": "transformers/tests/models/openai/test_modeling_openai.py", "repo_id": "transformers", "token_count": 5450 }
405
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Please note that Pop2PianoTokenizer is too far from our usual tokenizers and thus cannot use the TokenizerTesterMixin class. """ import os import pickle import shutil import tempfile import unittest from transformers.feature_extraction_utils import BatchFeature from transformers.testing_utils import ( is_pretty_midi_available, is_torch_available, require_pretty_midi, require_torch, ) from transformers.tokenization_utils import BatchEncoding if is_torch_available(): import torch requirements_available = is_torch_available() and is_pretty_midi_available() if requirements_available: import pretty_midi from transformers import Pop2PianoTokenizer @require_torch @require_pretty_midi class Pop2PianoTokenizerTest(unittest.TestCase): def setUp(self): super().setUp() self.tokenizer = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano") def get_input_notes(self): notes = [ [ pretty_midi.Note(start=0.441179, end=2.159456, pitch=70, velocity=77), pretty_midi.Note(start=0.673379, end=0.905578, pitch=73, velocity=77), pretty_midi.Note(start=0.905578, end=2.159456, pitch=73, velocity=77), pretty_midi.Note(start=1.114558, end=2.159456, pitch=78, velocity=77), pretty_midi.Note(start=1.323537, end=1.532517, pitch=80, velocity=77), ], [ pretty_midi.Note(start=0.441179, end=2.159456, pitch=70, velocity=77), ], ] return notes def test_call(self): notes = self.get_input_notes() output = self.tokenizer( notes, return_tensors="pt", padding="max_length", truncation=True, max_length=10, return_attention_mask=True, ) # check the output type self.assertTrue(isinstance(output, BatchEncoding)) # check the values expected_output_token_ids = torch.tensor( [[134, 133, 74, 135, 77, 132, 77, 133, 77, 82], [134, 133, 74, 136, 132, 74, 134, 134, 134, 134]] ) expected_output_attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0]]) self.assertTrue(torch.allclose(output["token_ids"], expected_output_token_ids, atol=1e-4)) self.assertTrue(torch.allclose(output["attention_mask"], expected_output_attention_mask, atol=1e-4)) def test_batch_decode(self): # test batch decode with model, feature-extractor outputs(beatsteps, extrapolated_beatstep) # Please note that this test does not test the accuracy of the outputs, instead it is designed to make sure that # the tokenizer's batch_decode can deal with attention_mask in feature-extractor outputs. For the accuracy check # please see the `test_batch_decode_outputs` test. model_output = torch.concatenate( [ torch.randint(size=[120, 96], low=0, high=70, dtype=torch.long), torch.zeros(size=[1, 96], dtype=torch.long), torch.randint(size=[50, 96], low=0, high=40, dtype=torch.long), torch.zeros(size=[1, 96], dtype=torch.long), ], axis=0, ) input_features = BatchFeature( { "beatsteps": torch.ones([2, 955]), "extrapolated_beatstep": torch.ones([2, 1000]), "attention_mask": torch.concatenate( [ torch.ones([120, 96], dtype=torch.long), torch.zeros([1, 96], dtype=torch.long), torch.ones([50, 96], dtype=torch.long), torch.zeros([1, 96], dtype=torch.long), ], axis=0, ), "attention_mask_beatsteps": torch.ones([2, 955]), "attention_mask_extrapolated_beatstep": torch.ones([2, 1000]), } ) output = self.tokenizer.batch_decode(token_ids=model_output, feature_extractor_output=input_features)[ "pretty_midi_objects" ] # check length self.assertTrue(len(output) == 2) # check object type self.assertTrue(isinstance(output[0], pretty_midi.pretty_midi.PrettyMIDI)) self.assertTrue(isinstance(output[1], pretty_midi.pretty_midi.PrettyMIDI)) def test_batch_decode_outputs(self): # test batch decode with model, feature-extractor outputs(beatsteps, extrapolated_beatstep) # Please note that this test tests the accuracy of the outputs of the tokenizer's `batch_decode` method. model_output = torch.tensor( [ [134, 133, 74, 135, 77, 82, 84, 136, 132, 74, 77, 82, 84], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ] ) input_features = BatchEncoding( { "beatsteps": torch.tensor([[0.0697, 0.1103, 0.1509, 0.1916]]), "extrapolated_beatstep": torch.tensor([[0.0000, 0.0406, 0.0813, 0.1219]]), } ) output = self.tokenizer.batch_decode(token_ids=model_output, feature_extractor_output=input_features) # check outputs self.assertEqual(len(output["notes"]), 4) predicted_start_timings, predicted_end_timings = [], [] for i in output["notes"]: predicted_start_timings.append(i.start) predicted_end_timings.append(i.end) # Checking note start timings expected_start_timings = torch.tensor( [ 0.069700, 0.110300, 0.110300, 0.110300, ] ) predicted_start_timings = torch.tensor(predicted_start_timings) self.assertTrue(torch.allclose(expected_start_timings, predicted_start_timings, atol=1e-4)) # Checking note end timings expected_end_timings = torch.tensor( [ 0.191600, 0.191600, 0.191600, 0.191600, ] ) predicted_end_timings = torch.tensor(predicted_end_timings) self.assertTrue(torch.allclose(expected_end_timings, predicted_end_timings, atol=1e-4)) def test_get_vocab(self): vocab_dict = self.tokenizer.get_vocab() self.assertIsInstance(vocab_dict, dict) self.assertGreaterEqual(len(self.tokenizer), len(vocab_dict)) vocab = [self.tokenizer.convert_ids_to_tokens(i) for i in range(len(self.tokenizer))] self.assertEqual(len(vocab), len(self.tokenizer)) self.tokenizer.add_tokens(["asdfasdfasdfasdf"]) vocab = [self.tokenizer.convert_ids_to_tokens(i) for i in range(len(self.tokenizer))] self.assertEqual(len(vocab), len(self.tokenizer)) def test_save_and_load_tokenizer(self): tmpdirname = tempfile.mkdtemp() sample_notes = self.get_input_notes() self.tokenizer.add_tokens(["bim", "bambam"]) additional_special_tokens = self.tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token") self.tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens}) before_token_ids = self.tokenizer(sample_notes)["token_ids"] before_vocab = self.tokenizer.get_vocab() self.tokenizer.save_pretrained(tmpdirname) after_tokenizer = self.tokenizer.__class__.from_pretrained(tmpdirname) after_token_ids = after_tokenizer(sample_notes)["token_ids"] after_vocab = after_tokenizer.get_vocab() self.assertDictEqual(before_vocab, after_vocab) self.assertListEqual(before_token_ids, after_token_ids) self.assertIn("bim", after_vocab) self.assertIn("bambam", after_vocab) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens) shutil.rmtree(tmpdirname) def test_pickle_tokenizer(self): tmpdirname = tempfile.mkdtemp() notes = self.get_input_notes() subwords = self.tokenizer(notes)["token_ids"] filename = os.path.join(tmpdirname, "tokenizer.bin") with open(filename, "wb") as handle: pickle.dump(self.tokenizer, handle) with open(filename, "rb") as handle: tokenizer_new = pickle.load(handle) subwords_loaded = tokenizer_new(notes)["token_ids"] self.assertListEqual(subwords, subwords_loaded) def test_padding_side_in_kwargs(self): tokenizer_p = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano", padding_side="left") self.assertEqual(tokenizer_p.padding_side, "left") tokenizer_p = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano", padding_side="right") self.assertEqual(tokenizer_p.padding_side, "right") self.assertRaises( ValueError, Pop2PianoTokenizer.from_pretrained, "sweetcocoa/pop2piano", padding_side="unauthorized", ) def test_truncation_side_in_kwargs(self): tokenizer_p = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano", truncation_side="left") self.assertEqual(tokenizer_p.truncation_side, "left") tokenizer_p = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano", truncation_side="right") self.assertEqual(tokenizer_p.truncation_side, "right") self.assertRaises( ValueError, Pop2PianoTokenizer.from_pretrained, "sweetcocoa/pop2piano", truncation_side="unauthorized", ) def test_right_and_left_padding(self): tokenizer = self.tokenizer notes = self.get_input_notes() notes = notes[0] max_length = 20 padding_idx = tokenizer.pad_token_id # RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "right" padded_notes = tokenizer(notes, padding="max_length", max_length=max_length)["token_ids"] padded_notes_length = len(padded_notes) notes_without_padding = tokenizer(notes, padding="do_not_pad")["token_ids"] padding_size = max_length - len(notes_without_padding) self.assertEqual(padded_notes_length, max_length) self.assertEqual(notes_without_padding + [padding_idx] * padding_size, padded_notes) # LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "left" padded_notes = tokenizer(notes, padding="max_length", max_length=max_length)["token_ids"] padded_notes_length = len(padded_notes) notes_without_padding = tokenizer(notes, padding="do_not_pad")["token_ids"] padding_size = max_length - len(notes_without_padding) self.assertEqual(padded_notes_length, max_length) self.assertEqual([padding_idx] * padding_size + notes_without_padding, padded_notes) # RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding' notes_without_padding = tokenizer(notes)["token_ids"] tokenizer.padding_side = "right" padded_notes_right = tokenizer(notes, padding=False)["token_ids"] self.assertEqual(len(padded_notes_right), len(notes_without_padding)) self.assertEqual(padded_notes_right, notes_without_padding) tokenizer.padding_side = "left" padded_notes_left = tokenizer(notes, padding="longest")["token_ids"] self.assertEqual(len(padded_notes_left), len(notes_without_padding)) self.assertEqual(padded_notes_left, notes_without_padding) tokenizer.padding_side = "right" padded_notes_right = tokenizer(notes, padding="longest")["token_ids"] self.assertEqual(len(padded_notes_right), len(notes_without_padding)) self.assertEqual(padded_notes_right, notes_without_padding) tokenizer.padding_side = "left" padded_notes_left = tokenizer(notes, padding=False)["token_ids"] self.assertEqual(len(padded_notes_left), len(notes_without_padding)) self.assertEqual(padded_notes_left, notes_without_padding) def test_right_and_left_truncation(self): tokenizer = self.tokenizer notes = self.get_input_notes() notes = notes[0] truncation_size = 3 # RIGHT TRUNCATION - Check that it correctly truncates when a maximum length is specified along with the truncation flag set to True tokenizer.truncation_side = "right" full_encoded_notes = tokenizer(notes)["token_ids"] full_encoded_notes_length = len(full_encoded_notes) truncated_notes = tokenizer(notes, max_length=full_encoded_notes_length - truncation_size, truncation=True)[ "token_ids" ] self.assertEqual(full_encoded_notes_length, len(truncated_notes) + truncation_size) self.assertEqual(full_encoded_notes[:-truncation_size], truncated_notes) # LEFT TRUNCATION - Check that it correctly truncates when a maximum length is specified along with the truncation flag set to True tokenizer.truncation_side = "left" full_encoded_notes = tokenizer(notes)["token_ids"] full_encoded_notes_length = len(full_encoded_notes) truncated_notes = tokenizer(notes, max_length=full_encoded_notes_length - truncation_size, truncation=True)[ "token_ids" ] self.assertEqual(full_encoded_notes_length, len(truncated_notes) + truncation_size) self.assertEqual(full_encoded_notes[truncation_size:], truncated_notes) # RIGHT & LEFT TRUNCATION - Check that nothing is done for 'longest' and 'no_truncation' tokenizer.truncation_side = "right" truncated_notes_right = tokenizer(notes, truncation=True)["token_ids"] self.assertEqual(full_encoded_notes_length, len(truncated_notes_right)) self.assertEqual(full_encoded_notes, truncated_notes_right) tokenizer.truncation_side = "left" truncated_notes_left = tokenizer(notes, truncation="longest_first")["token_ids"] self.assertEqual(len(truncated_notes_left), full_encoded_notes_length) self.assertEqual(truncated_notes_left, full_encoded_notes) tokenizer.truncation_side = "right" truncated_notes_right = tokenizer(notes, truncation="longest_first")["token_ids"] self.assertEqual(len(truncated_notes_right), full_encoded_notes_length) self.assertEqual(truncated_notes_right, full_encoded_notes) tokenizer.truncation_side = "left" truncated_notes_left = tokenizer(notes, truncation=True)["token_ids"] self.assertEqual(len(truncated_notes_left), full_encoded_notes_length) self.assertEqual(truncated_notes_left, full_encoded_notes) def test_padding_to_multiple_of(self): notes = self.get_input_notes() if self.tokenizer.pad_token is None: self.skipTest("No padding token.") else: normal_tokens = self.tokenizer(notes[0], padding=True, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") normal_tokens = self.tokenizer(notes[0], pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # Should also work with truncation normal_tokens = self.tokenizer(notes[0], padding=True, truncation=True, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # truncation to something which is not a multiple of pad_to_multiple_of raises an error self.assertRaises( ValueError, self.tokenizer.__call__, notes[0], padding=True, truncation=True, max_length=12, pad_to_multiple_of=8, ) def test_padding_with_attention_mask(self): if self.tokenizer.pad_token is None: self.skipTest("No padding token.") if "attention_mask" not in self.tokenizer.model_input_names: self.skipTest("This model does not use attention mask.") features = [ {"token_ids": [1, 2, 3, 4, 5, 6], "attention_mask": [1, 1, 1, 1, 1, 0]}, {"token_ids": [1, 2, 3], "attention_mask": [1, 1, 0]}, ] padded_features = self.tokenizer.pad(features) if self.tokenizer.padding_side == "right": self.assertListEqual(padded_features["attention_mask"], [[1, 1, 1, 1, 1, 0], [1, 1, 0, 0, 0, 0]]) else: self.assertListEqual(padded_features["attention_mask"], [[1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 0]])
transformers/tests/models/pop2piano/test_tokenization_pop2piano.py/0
{ "file_path": "transformers/tests/models/pop2piano/test_tokenization_pop2piano.py", "repo_id": "transformers", "token_count": 7905 }
406
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class RagTokenizerTest(TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() self.retrieval_vector_size = 8 # DPR tok vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer") os.makedirs(dpr_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) # BART tok vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer") os.makedirs(bart_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) def get_bart_tokenizer(self) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer")) def tearDown(self): shutil.rmtree(self.tmpdirname) @require_tokenizers def test_save_load_pretrained_with_saved_config(self): save_dir = os.path.join(self.tmpdirname, "rag_tokenizer") rag_config = RagConfig(question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict()) rag_tokenizer = RagTokenizer(question_encoder=self.get_dpr_tokenizer(), generator=self.get_bart_tokenizer()) rag_config.save_pretrained(save_dir) rag_tokenizer.save_pretrained(save_dir) new_rag_tokenizer = RagTokenizer.from_pretrained(save_dir, config=rag_config) self.assertIsInstance(new_rag_tokenizer.question_encoder, DPRQuestionEncoderTokenizerFast) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab(), rag_tokenizer.question_encoder.get_vocab()) self.assertIsInstance(new_rag_tokenizer.generator, BartTokenizerFast) self.assertEqual(new_rag_tokenizer.generator.get_vocab(), rag_tokenizer.generator.get_vocab()) @slow def test_pretrained_token_nq_tokenizer(self): tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") input_strings = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] input_dict = tokenizer(input_strings) self.assertIsNotNone(input_dict) @slow def test_pretrained_sequence_nq_tokenizer(self): tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq") input_strings = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] input_dict = tokenizer(input_strings) self.assertIsNotNone(input_dict)
transformers/tests/models/rag/test_tokenization_rag.py/0
{ "file_path": "transformers/tests/models/rag/test_tokenization_rag.py", "repo_id": "transformers", "token_count": 3143 }
407
# coding=utf-8 # Copyright 2021 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_modeling_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_bert import BertModelTester from ..speech_to_text.test_modeling_speech_to_text import Speech2TextModelTester from ..speech_to_text_2.test_modeling_speech_to_text_2 import Speech2Text2StandaloneDecoderModelTester from ..wav2vec2.test_modeling_wav2vec2 import Wav2Vec2ModelTester if is_torch_available(): import numpy as np import torch from transformers import ( BertLMHeadModel, Speech2Text2ForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, Wav2Vec2Model, ) from transformers.modeling_outputs import BaseModelOutput from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextEncoder @require_torch class EncoderDecoderMixin: def get_encoder_decoder_model(self, config, decoder_config): pass def prepare_config_and_inputs(self): pass def get_pretrained_model_and_inputs(self): pass def check_encoder_decoder_model_from_pretrained_configs( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): encoder_decoder_config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) self.assertTrue(encoder_decoder_config.decoder.is_decoder) enc_dec_model = SpeechEncoderDecoderModel(encoder_decoder_config) enc_dec_model.to(torch_device) enc_dec_model.eval() self.assertTrue(enc_dec_model.config.is_encoder_decoder) self.assertFalse(enc_dec_model.config.tie_word_embeddings) outputs_encoder_decoder = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) self.assertTrue(enc_dec_model.config.decoder.is_decoder) self.assertTrue(enc_dec_model.config.decoder.add_cross_attention) self.assertTrue(enc_dec_model.config.is_encoder_decoder) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) encoder_outputs = BaseModelOutput(last_hidden_state=outputs_encoder_decoder.encoder_hidden_states[-1]) outputs_encoder_decoder = enc_dec_model( encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model_with_inputs( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): inputs = input_values if input_features is None else input_features encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( inputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) outputs_encoder_decoder_kwarg = enc_dec_model( inputs=inputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, ) self.assertEqual( outputs_encoder_decoder_kwarg["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model_from_pretrained( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, return_dict, input_values=None, input_features=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict} enc_dec_model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, return_dict=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_save_and_load( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) enc_dec_model.eval() with torch.no_grad(): outputs = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: enc_dec_model.save_pretrained(tmpdirname) enc_dec_model = SpeechEncoderDecoderModel.from_pretrained(tmpdirname) enc_dec_model.to(torch_device) after_outputs = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_save_and_load_encoder_decoder_model( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) enc_dec_model.eval() with torch.no_grad(): outputs = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as encoder_tmp_dirname, tempfile.TemporaryDirectory() as decoder_tmp_dirname: enc_dec_model.encoder.save_pretrained(encoder_tmp_dirname) enc_dec_model.decoder.save_pretrained(decoder_tmp_dirname) SpeechEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=encoder_tmp_dirname, decoder_pretrained_model_name_or_path=decoder_tmp_dirname, ) after_outputs = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_encoder_decoder_model_output_attentions( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, labels=None, input_values=None, input_features=None, **kwargs, ): # make the decoder inputs a different shape from the encoder inputs to harden the test decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_attentions=True, ) inputs = input_values if input_features is None else input_features encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) seq_len = enc_dec_model.encoder._get_feat_extract_output_lengths(inputs.shape[1]) self.assertEqual(encoder_attentions[0].shape[-3:], (config.num_attention_heads, seq_len, seq_len)) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] self.assertEqual( cross_attentions[0].shape[-3:], (decoder_config.num_attention_heads, cross_attention_input_seq_len, seq_len), ) def check_encoder_decoder_model_generate( self, config, decoder_config, input_values=None, input_features=None, **kwargs ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) # make sure EOS token is set to None to prevent early stopping of generation if hasattr(enc_dec_model.config, "eos_token_id"): enc_dec_model.config.eos_token_id = None if hasattr(enc_dec_model.config, "decoder") and hasattr(enc_dec_model.config.decoder, "eos_token_id"): enc_dec_model.config.decoder.eos_token_id = None inputs = input_values if input_features is None else input_features # Bert does not have a bos token id, so use pad_token_id instead generated_output = enc_dec_model.generate( inputs, decoder_start_token_id=enc_dec_model.config.decoder.pad_token_id ) self.assertEqual(generated_output.shape, (inputs.shape[0],) + (decoder_config.max_length,)) def test_encoder_decoder_model(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model(**input_ids_dict) def test_encoder_decoder_model_with_inputs(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_with_inputs(**input_ids_dict) def test_encoder_decoder_model_from_pretrained_configs(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained_configs(**input_ids_dict) def test_encoder_decoder_model_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=False) def test_encoder_decoder_model_from_pretrained_return_dict(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=True) def test_save_and_load_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_save_and_load(**input_ids_dict) def test_save_and_load_from_encoder_decoder_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_save_and_load_encoder_decoder_model(**input_ids_dict) def test_encoder_decoder_model_output_attentions(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**input_ids_dict) def test_encoder_decoder_model_generate(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**input_ids_dict) def test_training_gradient_checkpointing(self): inputs_dict = self.prepare_config_and_inputs() encoder_model, decoder_model = self.get_encoder_decoder_model( inputs_dict["config"], inputs_dict["decoder_config"] ) model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) model.to(torch_device) model.train() model.gradient_checkpointing_enable() model.config.decoder_start_token_id = 0 model.config.pad_token_id = 0 model_inputs = { "attention_mask": inputs_dict["attention_mask"], "labels": inputs_dict["labels"], "decoder_input_ids": inputs_dict["decoder_input_ids"], } inputs = inputs_dict["input_features"] if "input_features" in inputs_dict else inputs_dict["input_values"] loss = model(inputs, **model_inputs).loss loss.backward() @slow def test_real_model_save_load_from_pretrained(self): model_2, inputs = self.get_pretrained_model_and_inputs() model_2.to(torch_device) with torch.no_grad(): outputs = model_2(**inputs) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = SpeechEncoderDecoderModel.from_pretrained(tmp_dirname) model_1.to(torch_device) after_outputs = model_1(**inputs) out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_torch class Wav2Vec2BertModelTest(EncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/wav2vec2-base-960h", "bert-base-cased" ) batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], model.decoder.config.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "input_values": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = Wav2Vec2Model(config).eval() decoder_model = BertLMHeadModel(decoder_config).eval() return encoder_model, decoder_model def prepare_config_and_inputs(self): bert_model_tester = BertModelTester(self) wav2vec2_model_tester = Wav2Vec2ModelTester(self) encoder_config_and_inputs = wav2vec2_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = bert_model_tester.prepare_config_and_inputs_for_decoder() ( config, input_values, input_mask, ) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_token_type_ids, decoder_input_mask, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_attention_mask, _, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True return { "config": config, "input_values": input_values, "attention_mask": input_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_input_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "labels": decoder_token_labels, } @require_torch class Speech2TextBertModelTest(EncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/s2t-small-librispeech-asr", "bert-base-cased" ) batch_size = 13 input_features = floats_tensor([batch_size, 7, 80], scale=1.0) attention_mask = random_attention_mask([batch_size, 7]) decoder_input_ids = ids_tensor([batch_size, 4], model.decoder.config.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "input_features": input_features, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = Speech2TextEncoder(config).eval() decoder_model = BertLMHeadModel(decoder_config).eval() return encoder_model, decoder_model def prepare_config_and_inputs(self): bert_model_tester = BertModelTester(self) speech2text_model_tester = Speech2TextModelTester(self) encoder_config_and_inputs = speech2text_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = bert_model_tester.prepare_config_and_inputs_for_decoder() config, inputs = encoder_config_and_inputs input_features = inputs["input_features"] input_mask = inputs["attention_mask"] ( decoder_config, decoder_input_ids, decoder_token_type_ids, decoder_input_mask, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_attention_mask, _, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True return { "config": config, "input_features": input_features, "attention_mask": input_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_input_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "labels": decoder_token_labels, } # can't save full model for now because Speech2TextModel != Speech2TextEncoder def test_encoder_decoder_model_from_pretrained_configs(self): pass # can't save full model for now because Speech2TextModel != Speech2TextEncoder def test_save_and_load_from_pretrained(self): pass # all published pretrained models are Speech2TextModel != Speech2TextEncoder def test_real_model_save_load_from_pretrained(self): pass @require_torch class Wav2Vec2Speech2Text2(EncoderDecoderMixin, unittest.TestCase): def get_encoder_decoder_model(self, config, decoder_config): encoder_model = Wav2Vec2Model(config).eval() decoder_model = Speech2Text2ForCausalLM(decoder_config).eval() return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = Wav2Vec2ModelTester(self, batch_size=13) model_tester_decoder = Speech2Text2StandaloneDecoderModelTester( self, batch_size=13, d_model=32, max_position_embeddings=512 ) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs() ( config, input_values, input_mask, ) = encoder_config_and_inputs (decoder_config, decoder_input_ids, decoder_attention_mask, _) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True # disable cache for now decoder_config.use_cache = False return { "config": config, "input_values": input_values, "attention_mask": input_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "labels": decoder_input_ids, } # there are no published pretrained Speech2Text2ForCausalLM for now def test_real_model_save_load_from_pretrained(self): pass
transformers/tests/models/speech_encoder_decoder/test_modeling_speech_encoder_decoder.py/0
{ "file_path": "transformers/tests/models/speech_encoder_decoder/test_modeling_speech_encoder_decoder.py", "repo_id": "transformers", "token_count": 11717 }
408
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Splinter model. """ import copy import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import SplinterConfig, SplinterForPreTraining, SplinterForQuestionAnswering, SplinterModel from transformers.models.splinter.modeling_splinter import SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST class SplinterModelTester: def __init__( self, parent, batch_size=13, num_questions=3, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, question_token_id=1, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_questions = num_questions self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.question_token_id = question_token_id self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids[:, 1] = self.question_token_id input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) start_positions = None end_positions = None question_positions = None if self.use_labels: start_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size) end_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size) question_positions = ids_tensor([self.batch_size, self.num_questions], self.num_labels) config = SplinterConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, question_token_id=self.question_token_id, ) return (config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=start_positions[:, 0], end_positions=end_positions[:, 0], ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.num_questions, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.num_questions, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class SplinterModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( SplinterModel, SplinterForQuestionAnswering, SplinterForPreTraining, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": SplinterModel, "question-answering": SplinterForQuestionAnswering} if is_torch_available() else {} ) # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "QAPipelineTests": return True elif pipeline_test_casse_name == "FeatureExtractionPipelineTests" and tokenizer_name.endswith("Fast"): return True return False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if issubclass(model_class, SplinterForPreTraining): inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) inputs_dict["question_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) elif issubclass(model_class, SplinterForQuestionAnswering): inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = SplinterModelTester(self) self.config_tester = ConfigTester(self, config_class=SplinterConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): if isinstance(model, SplinterForPreTraining): with self.assertRaises(TypeError): # question_positions must not be None. model(**inputs)[0] else: model(**inputs)[0] @slow def test_model_from_pretrained(self): for model_name in SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SplinterModel.from_pretrained(model_name) self.assertIsNotNone(model) # overwrite from common since `SplinterForPreTraining` could contain different number of question tokens in inputs. # When the batch is distributed to multiple devices, each replica could get different values for the maximal number # of question tokens (see `SplinterForPreTraining._prepare_question_positions()`), and the model returns different # shape along dimension 1 (i.e. `num_questions`) that could not be combined into a single tensor as an output. @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): from torch import nn config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # some params shouldn't be scattered by nn.DataParallel # so just remove them if they are present. blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"] for k in blacklist_non_batched_params: inputs_dict.pop(k, None) # move input tensors to cuda:O for k, v in inputs_dict.items(): if torch.is_tensor(v): inputs_dict[k] = v.to(0) for model_class in self.all_model_classes: # Skip this case since it will fail sometimes, as described above. if model_class == SplinterForPreTraining: continue model = model_class(config=config) model.to(0) model.eval() # Wrap model in nn.DataParallel model = nn.DataParallel(model) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)) @require_torch class SplinterModelIntegrationTest(unittest.TestCase): @slow def test_splinter_question_answering(self): model = SplinterForQuestionAnswering.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] Brad was born in [QUESTION] . He returned to the United Kingdom later . [SEP]" # Output should be the span "the United Kingdom" input_ids = torch.tensor( [[101, 7796, 1108, 1255, 1107, 104, 119, 1124, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) output = model(input_ids) expected_shape = torch.Size((1, 16)) self.assertEqual(output.start_logits.shape, expected_shape) self.assertEqual(output.end_logits.shape, expected_shape) self.assertEqual(torch.argmax(output.start_logits), 10) self.assertEqual(torch.argmax(output.end_logits), 12) @slow def test_splinter_pretraining(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) question_positions = torch.tensor([[1, 5]], dtype=torch.long) output = model(input_ids, question_positions=question_positions) expected_shape = torch.Size((1, 2, 16)) self.assertEqual(output.start_logits.shape, expected_shape) self.assertEqual(output.end_logits.shape, expected_shape) self.assertEqual(torch.argmax(output.start_logits[0, 0]), 7) self.assertEqual(torch.argmax(output.end_logits[0, 0]), 7) self.assertEqual(torch.argmax(output.start_logits[0, 1]), 10) self.assertEqual(torch.argmax(output.end_logits[0, 1]), 12) @slow def test_splinter_pretraining_loss_requires_question_positions(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) start_positions = torch.tensor([[7, 10]], dtype=torch.long) end_positions = torch.tensor([7, 12], dtype=torch.long) with self.assertRaises(TypeError): model( input_ids, start_positions=start_positions, end_positions=end_positions, ) @slow def test_splinter_pretraining_loss(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [ [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], ] ) start_positions = torch.tensor([[7, 10], [7, 10]], dtype=torch.long) end_positions = torch.tensor([[7, 12], [7, 12]], dtype=torch.long) question_positions = torch.tensor([[1, 5], [1, 5]], dtype=torch.long) output = model( input_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) self.assertAlmostEqual(output.loss.item(), 0.0024, 4) @slow def test_splinter_pretraining_loss_with_padding(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [ [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], ] ) start_positions = torch.tensor([[7, 10]], dtype=torch.long) end_positions = torch.tensor([7, 12], dtype=torch.long) question_positions = torch.tensor([[1, 5]], dtype=torch.long) start_positions_with_padding = torch.tensor([[7, 10, 0]], dtype=torch.long) end_positions_with_padding = torch.tensor([7, 12, 0], dtype=torch.long) question_positions_with_padding = torch.tensor([[1, 5, 0]], dtype=torch.long) output = model( input_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) output_with_padding = model( input_ids, start_positions=start_positions_with_padding, end_positions=end_positions_with_padding, question_positions=question_positions_with_padding, ) self.assertAlmostEqual(output.loss.item(), output_with_padding.loss.item(), 4) # Note that the original code uses 0 to denote padded question tokens # and their start and end positions. As the pad_token_id of the model's # config is used for the losse's ignore_index in SplinterForPreTraining, # we add this test to ensure anybody making changes to the default # value of the config, will be aware of the implication. self.assertEqual(model.config.pad_token_id, 0) @slow def test_splinter_pretraining_prepare_question_positions(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") input_ids = torch.tensor( [ [101, 104, 1, 2, 104, 3, 4, 102], [101, 1, 104, 2, 104, 3, 104, 102], [101, 1, 2, 104, 104, 3, 4, 102], [101, 1, 2, 3, 4, 5, 104, 102], ] ) question_positions = torch.tensor([[1, 4, 0], [2, 4, 6], [3, 4, 0], [6, 0, 0]], dtype=torch.long) output_without_positions = model(input_ids) output_with_positions = model(input_ids, question_positions=question_positions) self.assertTrue((output_without_positions.start_logits == output_with_positions.start_logits).all()) self.assertTrue((output_without_positions.end_logits == output_with_positions.end_logits).all())
transformers/tests/models/splinter/test_modeling_splinter.py/0
{ "file_path": "transformers/tests/models/splinter/test_modeling_splinter.py", "repo_id": "transformers", "token_count": 9643 }
409
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class TimmBackboneModelTester: def __init__( self, parent, out_indices=None, out_features=None, stage_names=None, backbone="resnet18", batch_size=3, image_size=32, num_channels=3, is_training=True, use_pretrained_backbone=True, ): self.parent = parent self.out_indices = out_indices if out_indices is not None else [4] self.stage_names = stage_names self.out_features = out_features self.backbone = backbone self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.use_pretrained_backbone = use_pretrained_backbone self.is_training = is_training def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return TimmBackboneConfig( image_size=self.image_size, num_channels=self.num_channels, out_features=self.out_features, out_indices=self.out_indices, stage_names=self.stage_names, use_pretrained_backbone=self.use_pretrained_backbone, backbone=self.backbone, ) def create_and_check_model(self, config, pixel_values): model = TimmBackbone(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual( result.feature_map[-1].shape, (self.batch_size, model.channels[-1], 14, 14), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch @require_timm class TimmBackboneModelTest(ModelTesterMixin, BackboneTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TimmBackbone,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": TimmBackbone} if is_torch_available() else {} test_resize_embeddings = False test_head_masking = False test_pruning = False has_attentions = False def setUp(self): self.config_class = PretrainedConfig self.model_tester = TimmBackboneModelTester(self) self.config_tester = ConfigTester(self, config_class=self.config_class, has_text_modality=False) def test_config(self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def test_timm_transformer_backbone_equivalence(self): timm_checkpoint = "resnet18" transformers_checkpoint = "microsoft/resnet-18" timm_model = AutoBackbone.from_pretrained(timm_checkpoint, use_timm_backbone=True) transformers_model = AutoBackbone.from_pretrained(transformers_checkpoint) self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features)) self.assertEqual(len(timm_model.stage_names), len(transformers_model.stage_names)) self.assertEqual(timm_model.channels, transformers_model.channels) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices, (-1,)) self.assertEqual(transformers_model.out_indices, [len(timm_model.stage_names) - 1]) timm_model = AutoBackbone.from_pretrained(timm_checkpoint, use_timm_backbone=True, out_indices=[1, 2, 3]) transformers_model = AutoBackbone.from_pretrained(transformers_checkpoint, out_indices=[1, 2, 3]) self.assertEqual(timm_model.out_indices, transformers_model.out_indices) self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features)) self.assertEqual(timm_model.channels, transformers_model.channels) @unittest.skip("TimmBackbone doesn't support feed forward chunking") def test_feed_forward_chunking(self): pass @unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute") def test_hidden_states_output(self): pass @unittest.skip("TimmBackbone initialization is managed on the timm side") def test_initialization(self): pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds") def test_model_common_attributes(self): pass @unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint") def test_from_pretrained_no_checkpoint(self): pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone") def test_save_load(self): pass @unittest.skip("model weights aren't tied in TimmBackbone.") def test_tie_model_weights(self): pass @unittest.skip("model weights aren't tied in TimmBackbone.") def test_tied_model_weights_key_ignore(self): pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone") def test_load_save_without_tied_weights(self): pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone") def test_model_weights_reload_no_missing_tied_weights(self): pass @unittest.skip("TimmBackbone doesn't have hidden size info in its configuration.") def test_channels(self): pass @unittest.skip("TimmBackbone doesn't support output_attentions.") def test_torchscript_output_attentions(self): pass @unittest.skip("Safetensors is not supported by timm.") def test_can_use_safetensors(self): pass @unittest.skip("Need to use a timm backbone and there is no tiny model available.") def test_model_is_small(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0][-1] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) # TimmBackbone config doesn't have out_features attribute def test_create_from_modified_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), len(config.out_indices)) self.assertEqual(len(model.channels), len(config.out_indices)) # Check output of last stage is taken if out_features=None, out_indices=None modified_config = copy.deepcopy(config) modified_config.out_indices = None model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) # Check backbone can be initialized with fresh weights modified_config = copy.deepcopy(config) modified_config.use_pretrained_backbone = False model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict)
transformers/tests/models/timm_backbone/test_modeling_timm_backbone.py/0
{ "file_path": "transformers/tests/models/timm_backbone/test_modeling_timm_backbone.py", "repo_id": "transformers", "token_count": 4294 }
410
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch UniSpeechSat model. """ import math import unittest import numpy as np import pytest from datasets import load_dataset from transformers import UniSpeechSatConfig, is_torch_available from transformers.testing_utils import require_soundfile, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( UniSpeechSatForAudioFrameClassification, UniSpeechSatForCTC, UniSpeechSatForPreTraining, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, UniSpeechSatModel, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, ) class UniSpeechSatModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, # speech is longer is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, # this is most likely not correctly set yet intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, mask_time_prob=0.5, mask_time_length=2, vocab_size=32, do_stable_layer_norm=False, tdnn_dim=(32, 32), tdnn_kernel=(3, 3), tdnn_dilation=(1, 1), xvector_output_dim=32, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return UniSpeechSatConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, mask_time_prob=self.mask_time_prob, mask_time_length=self.mask_time_length, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, ) def create_and_check_model(self, config, input_values, attention_mask): model = UniSpeechSatModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = UniSpeechSatModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = UniSpeechSatForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = UniSpeechSatForSequenceClassification(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = UniSpeechSatForCTC(config=config) model.to(torch_device) model.train() # freeze feature encoder model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lengths are at least # one shorter than logit lengths to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = UniSpeechSatForSequenceClassification(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_xvector_training(self, config, *args): config.ctc_zero_infinity = True model = UniSpeechSatForXVector(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() # use a longer sequence length to account for TDNN temporal downsampling input_values = floats_tensor([self.batch_size, self.seq_length * 2], scale=1.0) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = UniSpeechSatForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with pytest.raises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class UniSpeechSatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( UniSpeechSatForCTC, UniSpeechSatForPreTraining, UniSpeechSatModel, UniSpeechSatForSequenceClassification, UniSpeechSatForAudioFrameClassification, UniSpeechSatForXVector, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": UniSpeechSatForSequenceClassification, "automatic-speech-recognition": UniSpeechSatForCTC, "feature-extraction": UniSpeechSatModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False test_torchscript = False def setUp(self): self.model_tester = UniSpeechSatModelTester(self) self.config_tester = ConfigTester(self, config_class=UniSpeechSatConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) # UniSpeechSat has no inputs_embeds def test_inputs_embeds(self): pass # `input_ids` is renamed to `input_values` def test_forward_signature(self): pass # UniSpeechSat cannot resize token embeddings # since it has no tokens embeddings def test_resize_tokens_embeddings(self): pass # UniSpeechSat has no inputs_embeds # and thus the `get_input_embeddings` fn # is not implemented def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "label_embeddings_concat", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = UniSpeechSatForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = UniSpeechSatForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-base-plus") self.assertIsNotNone(model) @require_torch class UniSpeechSatRobustModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( (UniSpeechSatForCTC, UniSpeechSatForPreTraining, UniSpeechSatModel, UniSpeechSatForSequenceClassification) if is_torch_available() else () ) test_pruning = False test_headmasking = False test_torchscript = False def setUp(self): self.model_tester = UniSpeechSatModelTester( self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True ) self.config_tester = ConfigTester(self, config_class=UniSpeechSatConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) # UniSpeechSat has no inputs_embeds def test_inputs_embeds(self): pass # `input_ids` is renamed to `input_values` def test_forward_signature(self): pass # UniSpeechSat cannot resize token embeddings # since it has no tokens embeddings def test_resize_tokens_embeddings(self): pass # UniSpeechSat has no inputs_embeds # and thus the `get_input_embeddings` fn # is not implemented def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "label_embeddings_concat", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = UniSpeechSatForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = UniSpeechSatForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_feature_prob_ctc_single_batch(self): model = UniSpeechSatForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", mask_time_prob=0.2, mask_feature_prob=0.2, mask_time_length=2, mask_feature_length=2, ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True ) batch_duration_in_seconds = [6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (1, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-large") self.assertIsNotNone(model) @require_torch @require_soundfile @slow class UniSpeechSatModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): ds = load_dataset("anton-l/superb_dummy", task, split="test") return ds[:num_samples] def test_inference_encoder_base(self): model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-base-plus") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), ) # fmt: off expected_hidden_states_slice = torch.tensor( [[[-0.0743, 0.1384], [-0.0845, 0.1704]], [[-0.0954, 0.1936], [-0.1123, 0.2095]]], device=torch_device, ) # fmt: on self.assertTrue(torch.allclose(outputs.last_hidden_state[:, :2, -2:], expected_hidden_states_slice, atol=1e-3)) def test_inference_encoder_large(self): model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-large") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-large-xlsr-53") input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), ) # fmt: off expected_hidden_states_slice = torch.tensor( [[[-0.1172, -0.0797], [-0.0012, 0.0213]], [[-0.1225, -0.1277], [-0.0668, -0.0585]]], device=torch_device, ) # fmt: on self.assertTrue(torch.allclose(outputs.last_hidden_state[:, :2, -2:], expected_hidden_states_slice, atol=1e-3)) def test_inference_diarization(self): model = UniSpeechSatForAudioFrameClassification.from_pretrained("microsoft/unispeech-sat-base-plus-sd").to( torch_device ) processor = Wav2Vec2FeatureExtractor.from_pretrained("microsoft/unispeech-sat-base-plus-sd") input_data = self._load_superb("sd", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) # labels is a one-hot array of shape (num_frames, num_speakers) labels = (outputs.logits > 0).long() # s3prl logits for the same batch expected_logits = torch.tensor( [ [[-5.6119, -5.5845], [-3.7772, -5.4824], [-3.6914, -5.1619], [-4.7560, -5.0496]], [[-6.3785, -4.8365], [-5.5863, -5.4149], [-5.5639, -4.8469], [-6.1511, -4.0052]], [[-6.0355, -3.7414], [-5.5968, -4.8061], [-5.4620, -4.7310], [-5.5864, -4.6078]], [[-5.9493, -4.8963], [-4.4050, -5.4476], [-4.1755, -5.1395], [-4.0272, -4.3705]], ], device=torch_device, ) self.assertEqual(labels[0, :, 0].sum(), 270) self.assertEqual(labels[0, :, 1].sum(), 647) self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2)) def test_inference_speaker_verification(self): model = UniSpeechSatForXVector.from_pretrained("microsoft/unispeech-sat-base-plus-sv").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("microsoft/unispeech-sat-base-plus-sv") input_data = self._load_superb("si", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) labels = torch.tensor([5, 1, 1, 3], device=torch_device).T with torch.no_grad(): input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) outputs = model(input_values, attention_mask=attention_mask, labels=labels) embeddings = torch.nn.functional.normalize(outputs.embeddings, dim=-1) cosine_sim = torch.nn.CosineSimilarity(dim=-1) # id10002 vs id10002 self.assertAlmostEqual(cosine_sim(embeddings[1], embeddings[2]).item(), 0.9671, 3) # id10006 vs id10002 self.assertAlmostEqual(cosine_sim(embeddings[0], embeddings[1]).item(), 0.4941, 3) # id10002 vs id10004 self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).item(), 0.5616, 3) self.assertAlmostEqual(outputs.loss.item(), 18.5925, 2)
transformers/tests/models/unispeech_sat/test_modeling_unispeech_sat.py/0
{ "file_path": "transformers/tests/models/unispeech_sat/test_modeling_unispeech_sat.py", "repo_id": "transformers", "token_count": 17114 }
411
# coding=utf-8 # Copyright 2022 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow VisionEncoderDecoder model. """ from __future__ import annotations import copy import os import tempfile import unittest import numpy as np from transformers import is_tf_available, is_torch_available, is_vision_available from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_vision, slow, torch_device, ) from transformers.utils.generic import ModelOutput from ...test_modeling_tf_common import floats_tensor, ids_tensor from ..gpt2.test_modeling_tf_gpt2 import TFGPT2ModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): import tensorflow as tf from transformers import ( AutoConfig, AutoImageProcessor, AutoTokenizer, TFAutoModel, TFAutoModelForCausalLM, TFGPT2LMHeadModel, TFVisionEncoderDecoderModel, TFViTModel, VisionEncoderDecoderConfig, ) from transformers.modeling_tf_outputs import TFBaseModelOutput if is_torch_available(): import torch from transformers import GPT2LMHeadModel, VisionEncoderDecoderModel, ViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor @require_tf class TFVisionEncoderDecoderMixin: def get_encoder_decoder_model(self, config, decoder_config): raise NotImplementedError def prepare_config_and_inputs(self): raise NotImplementedError def get_pretrained_model(self): raise NotImplementedError def check_encoder_decoder_model_from_pretrained_configs( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) self.assertTrue(encoder_decoder_config.decoder.is_decoder) enc_dec_model = TFVisionEncoderDecoderModel(encoder_decoder_config) self.assertTrue(enc_dec_model.config.is_encoder_decoder) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) def check_encoder_decoder_model( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFVisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) self.assertTrue(enc_dec_model.config.decoder.is_decoder) self.assertTrue(enc_dec_model.config.decoder.add_cross_attention) self.assertTrue(enc_dec_model.config.is_encoder_decoder) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_hidden_states) outputs_encoder_decoder = enc_dec_model( pixel_values=None, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) def check_encoder_decoder_model_from_pretrained( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, return_dict, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict} enc_dec_model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, return_dict=True, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) def check_save_and_load( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFVisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: enc_dec_model.save_pretrained(tmpdirname) enc_dec_model = TFVisionEncoderDecoderModel.from_pretrained(tmpdirname) after_outputs = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_encoder_decoder_model_labels( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, labels, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFVisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=labels, kwargs=kwargs, ) # Make sure `loss` exist self.assertIn("loss", outputs_encoder_decoder) batch_size, seq_len = decoder_input_ids.shape expected_shape = (batch_size, seq_len, decoder_config.vocab_size) self.assertEqual(outputs_encoder_decoder["logits"].shape, expected_shape) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) def check_encoder_decoder_model_output_attentions( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): # make the decoder inputs a different shape from the encoder inputs to harden the test decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFVisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=True, kwargs=kwargs, ) encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) self.assertEqual(encoder_attentions[0].shape[-3:-2], (config.num_attention_heads,)) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] * ( 1 + (decoder_config.ngram if hasattr(decoder_config, "ngram") else 0) ) self.assertEqual( cross_attentions[0].shape[-3:-1], (decoder_config.num_attention_heads, cross_attention_input_seq_len), ) def check_encoder_decoder_model_generate(self, pixel_values, config, decoder_config, **kwargs): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFVisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) # Generate until max length if hasattr(enc_dec_model.config, "eos_token_id"): enc_dec_model.config.eos_token_id = None if hasattr(enc_dec_model.config, "decoder") and hasattr(enc_dec_model.config.decoder, "eos_token_id"): enc_dec_model.config.decoder.eos_token_id = None # Bert does not have a bos token id, so use pad_token_id instead generated_output = enc_dec_model.generate( pixel_values, decoder_start_token_id=enc_dec_model.config.decoder.pad_token_id ) self.assertEqual( tuple(generated_output.shape.as_list()), (pixel_values.shape[0],) + (decoder_config.max_length,) ) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): """Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way. Args: model_class: The class of the model that is currently testing. For example, `TFBertModel`, TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative error messages. name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc. attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element being a named field in the output. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(tf_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is", ) tf_keys = [k for k, v in tf_outputs.items() if v is not None] pt_keys = [k for k, v in pt_outputs.items() if v is not None] self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `names` attributes = tuple([f"{name}.{k}" for k in tf_keys]) self.check_pt_tf_outputs( tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(tf_outputs) in [tuple, list]: self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch") self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch") if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(tf_outputs), f"{name}: The tuple `names` should have the same length as `tf_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `names` attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))]) for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes): self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(tf_outputs, tf.Tensor): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is" ) tf_outputs = tf_outputs.numpy() pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(tf_outputs): tf_outputs = np.array([tf_outputs]) pt_outputs = np.array([pt_outputs]) tf_nans = np.isnan(tf_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[tf_nans] = 0 tf_outputs[tf_nans] = 0 pt_outputs[pt_nans] = 0 tf_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(tf_outputs - pt_outputs)) self.assertLessEqual(max_diff, tol, f"{name}: Difference between torch and tf is {max_diff} (>= {tol}).") else: raise ValueError( "`tf_outputs` should be an instance of `tf.Tensor`, a `tuple`, or an instance of `tf.Tensor`. Got" f" {type(tf_outputs)} instead." ) def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict): pt_inputs_dict = {} for name, key in tf_inputs_dict.items(): if isinstance(key, bool): pt_inputs_dict[name] = key elif name == "input_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "pixel_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "input_features": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) # other general float inputs elif tf_inputs_dict[name].dtype.is_floating: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) else: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long) return pt_inputs_dict def check_pt_tf_models(self, tf_model, pt_model, tf_inputs_dict): pt_inputs_dict = self.prepare_pt_inputs_from_tf_inputs(tf_inputs_dict) # send pytorch inputs to the correct device pt_inputs_dict = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items() } # send pytorch model to the correct device pt_model.to(torch_device) # Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences pt_model.eval() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs_dict) tf_outputs = tf_model(tf_inputs_dict) # tf models returned loss is usually a tensor rather than a scalar. # (see `hf_compute_loss`: it uses `keras.losses.Reduction.NONE`) # Change it here to a scalar to match PyTorch models' loss tf_loss = getattr(tf_outputs, "loss", None) if tf_loss is not None: tf_outputs.loss = tf.math.reduce_mean(tf_loss) self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(tf_model)) def check_pt_tf_equivalence(self, tf_model, pt_model, tf_inputs_dict): """Wrap `check_pt_tf_models` to further check PT -> TF again""" self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) # PT -> TF with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) tf_model = TFVisionEncoderDecoderModel.from_pretrained(tmpdirname) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) def check_pt_to_tf_equivalence(self, config, decoder_config, tf_inputs_dict): encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) # Output all for aggressive testing encoder_decoder_config.output_hidden_states = True # All models tested in this file have attentions encoder_decoder_config.output_attentions = True pt_model = VisionEncoderDecoderModel(encoder_decoder_config) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) tf_model = TFVisionEncoderDecoderModel.from_pretrained(tmpdirname) self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict) def check_tf_to_pt_equivalence(self, config, decoder_config, tf_inputs_dict): encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) # Output all for aggressive testing encoder_decoder_config.output_hidden_states = True # TODO: A generalizable way to determine this attribute encoder_decoder_config.output_attentions = True tf_model = TFVisionEncoderDecoderModel(encoder_decoder_config) # Make sure model is built before saving tf_model(**tf_inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: tf_model.save_pretrained(tmpdirname, safe_serialization=False) pt_model = VisionEncoderDecoderModel.from_pretrained(tmpdirname, from_tf=True) self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict) def test_encoder_decoder_model(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model(**config_inputs_dict) def test_encoder_decoder_model_from_pretrained_configs(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained_configs(**config_inputs_dict) def test_encoder_decoder_model_from_pretrained(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**config_inputs_dict, return_dict=False) def test_encoder_decoder_model_from_pretrained_return_dict(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**config_inputs_dict, return_dict=True) def test_save_and_load_from_pretrained(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_save_and_load(**config_inputs_dict) def test_encoder_decoder_model_labels(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_labels(**config_inputs_dict) def test_encoder_decoder_model_output_attentions(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**config_inputs_dict) def test_encoder_decoder_model_generate(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**config_inputs_dict) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and tf is {diff} (>= {tol}).") @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self): config_inputs_dict = self.prepare_config_and_inputs() labels = config_inputs_dict.pop("decoder_token_labels") # Keep only common arguments arg_names = [ "config", "pixel_values", "decoder_config", "decoder_input_ids", "decoder_attention_mask", "encoder_hidden_states", ] config_inputs_dict = {k: v for k, v in config_inputs_dict.items() if k in arg_names} config = config_inputs_dict.pop("config") decoder_config = config_inputs_dict.pop("decoder_config") # Output all for aggressive testing config.output_hidden_states = True decoder_config.output_hidden_states = True # All models tested in this file have attentions config.output_attentions = True decoder_config.output_attentions = True tf_inputs_dict = config_inputs_dict # `encoder_hidden_states` is not used in model call/forward del tf_inputs_dict["encoder_hidden_states"] # Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency # of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`. for k in ["decoder_attention_mask"]: attention_mask = tf_inputs_dict[k] # Make sure no all 0s attention masks - to avoid failure at this moment. # Put `1` at the beginning of sequences to make it still work when combining causal attention masks. # TODO: remove this line once a fix regarding large negative values for attention mask is done. attention_mask = tf.concat( [tf.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], axis=-1 ) tf_inputs_dict[k] = attention_mask tf_inputs_dict_with_labels = copy.copy(tf_inputs_dict) tf_inputs_dict_with_labels["labels"] = labels self.assertTrue(decoder_config.cross_attention_hidden_size is None) # Original test: check without `labels` and without `enc_to_dec_proj` projection self.assertTrue(config.hidden_size == decoder_config.hidden_size) self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict) self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict) # check with `labels` self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict_with_labels) self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict_with_labels) # check `enc_to_dec_proj` work as expected decoder_config.hidden_size = decoder_config.hidden_size * 2 self.assertTrue(config.hidden_size != decoder_config.hidden_size) self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict) self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict) @slow def test_real_model_save_load_from_pretrained(self): model_2 = self.get_pretrained_model() pixel_values = floats_tensor( [ 13, model_2.config.encoder.num_channels, model_2.config.encoder.image_size, model_2.config.encoder.image_size, ] ) decoder_input_ids = ids_tensor([13, 1], model_2.config.decoder.vocab_size) outputs = model_2( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = TFVisionEncoderDecoderModel.from_pretrained(tmp_dirname) after_outputs = model_1(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_tf class TFViT2GPT2EncoderDecoderModelTest(TFVisionEncoderDecoderMixin, unittest.TestCase): def get_pretrained_model(self): return TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained("google/vit-base-patch16-224-in21k", "gpt2") def get_encoder_decoder_model(self, config, decoder_config): encoder_model = TFViTModel(config, name="encoder") decoder_model = TFGPT2LMHeadModel(decoder_config, name="decoder") return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = TFViTModelTester(self, batch_size=13) model_tester_decoder = TFGPT2ModelTester(self) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs_for_decoder() (config, pixel_values, labels) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_attention_mask, decoder_head_mask, decoder_token_type_ids, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True # disable cache for now decoder_config.use_cache = False return { "config": config, "pixel_values": pixel_values, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "decoder_token_labels": decoder_token_labels, "encoder_hidden_states": encoder_hidden_states, # This is not used in the tests. "labels": decoder_token_labels, } @require_tf class TFVisionEncoderDecoderModelTest(unittest.TestCase): def get_from_encoderdecoder_pretrained_model(self): return TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained("google/vit-base-patch16-224-in21k", "gpt2") def get_decoder_config(self): config = AutoConfig.from_pretrained("gpt2") config.is_decoder = True config.add_cross_attention = True return config def get_encoderdecoder_model(self): return TFVisionEncoderDecoderModel.from_pretrained("ydshieh/vit-gpt2-coco-en") def get_encoder_decoder_models(self): encoder_model = TFViTModel.from_pretrained("google/vit-base-patch16-224-in21k", name="encoder") decoder_model = TFGPT2LMHeadModel.from_pretrained("gpt2", config=self.get_decoder_config(), name="decoder") return {"encoder": encoder_model, "decoder": decoder_model} def _check_configuration_tie(self, model): assert id(model.decoder.config) == id(model.config.decoder) assert id(model.encoder.config) == id(model.config.encoder) @slow def test_configuration_tie(self): model = self.get_from_encoderdecoder_pretrained_model() self._check_configuration_tie(model) model = TFVisionEncoderDecoderModel(**self.get_encoder_decoder_models()) self._check_configuration_tie(model) model = self.get_encoderdecoder_model() self._check_configuration_tie(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf class TFVisionEncoderDecoderModelSaveLoadTests(unittest.TestCase): def get_encoder_decoder_config(self): encoder_config = AutoConfig.from_pretrained("google/vit-base-patch16-224-in21k") decoder_config = AutoConfig.from_pretrained("gpt2", is_decoder=True, add_cross_attention=True) return VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder_config, decoder_config) def get_encoder_decoder_config_small(self): encoder_config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-vit") decoder_config = AutoConfig.from_pretrained( "hf-internal-testing/tiny-random-gpt2", is_decoder=True, add_cross_attention=True ) return VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder_config, decoder_config) def test_encoder_decoder_save_load_from_encoder_decoder(self): config = self.get_encoder_decoder_config_small() # create two random ViT/GPT2 models for vit-gpt2 & initialize weights (+cross_attention weights) encoder = TFViTModel(config.encoder) encoder.build_in_name_scope() decoder = TFGPT2LMHeadModel(config.decoder) decoder.build_in_name_scope() encoder_decoder_orig = TFVisionEncoderDecoderModel(encoder=encoder, decoder=decoder) pixel_values = floats_tensor( [ 13, encoder.config.num_channels, encoder.config.image_size, encoder.config.image_size, ] ) decoder_input_ids = ids_tensor([13, 1], decoder.config.vocab_size) logits_orig = encoder_decoder_orig(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits with tempfile.TemporaryDirectory() as tmp_dirname: encoder_path = os.path.join(tmp_dirname, "encoder") decoder_path = os.path.join(tmp_dirname, "decoder") encoder.save_pretrained(encoder_path) decoder.save_pretrained(decoder_path) encoder_decoder = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_path, decoder_path) logits_1 = encoder_decoder(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits self.assertTrue(logits_orig.numpy().sum() - logits_1.numpy().sum() < 1e-3) max_diff = np.max(np.abs(logits_1.numpy() - logits_orig.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=4) with tempfile.TemporaryDirectory() as tmp_dirname: encoder_decoder.save_pretrained(tmp_dirname) encoder_decoder = TFVisionEncoderDecoderModel.from_pretrained(tmp_dirname) logits_2 = encoder_decoder(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits max_diff = np.max(np.abs(logits_2.numpy() - logits_orig.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=4) @require_torch @is_pt_tf_cross_test def test_encoder_decoder_save_load_from_encoder_decoder_from_pt(self): config = self.get_encoder_decoder_config_small() # create two random ViT/GPT2 models for vit-gpt2 & initialize weights (+cross_attention weights) encoder_pt = ViTModel(config.encoder).to(torch_device).eval() decoder_pt = GPT2LMHeadModel(config.decoder).to(torch_device).eval() encoder_decoder_pt = VisionEncoderDecoderModel(encoder=encoder_pt, decoder=decoder_pt).to(torch_device).eval() pixel_values = floats_tensor( [ 13, encoder_pt.config.num_channels, encoder_pt.config.image_size, encoder_pt.config.image_size, ] ) decoder_input_ids = ids_tensor([13, 1], decoder_pt.config.vocab_size) pt_pixel_values = torch.tensor(pixel_values.numpy(), device=torch_device, dtype=torch.float) pt_decoder_input_ids = torch.tensor(decoder_input_ids.numpy(), device=torch_device, dtype=torch.long) logits_pt = encoder_decoder_pt(pixel_values=pt_pixel_values, decoder_input_ids=pt_decoder_input_ids).logits # PyTorch => TensorFlow with tempfile.TemporaryDirectory() as tmp_dirname_1, tempfile.TemporaryDirectory() as tmp_dirname_2: encoder_decoder_pt.encoder.save_pretrained(tmp_dirname_1) encoder_decoder_pt.decoder.save_pretrained(tmp_dirname_2) encoder_decoder_tf = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( tmp_dirname_1, tmp_dirname_2 ) logits_tf = encoder_decoder_tf(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits max_diff = np.max(np.abs(logits_pt.detach().cpu().numpy() - logits_tf.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=3) # Make sure `from_pretrained` following `save_pretrained` work and give the same result # (See https://github.com/huggingface/transformers/pull/14016) with tempfile.TemporaryDirectory() as tmp_dirname: encoder_decoder_tf.save_pretrained(tmp_dirname, safe_serialization=False) encoder_decoder_tf = TFVisionEncoderDecoderModel.from_pretrained(tmp_dirname) logits_tf_2 = encoder_decoder_tf(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits max_diff = np.max(np.abs(logits_tf_2.numpy() - logits_tf.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=3) @require_vision @slow def test_encoder_decoder_from_pretrained(self): load_weight_prefix = TFVisionEncoderDecoderModel.load_weight_prefix config = self.get_encoder_decoder_config() image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") decoder_tokenizer = AutoTokenizer.from_pretrained("gpt2") img = prepare_img() pixel_values = image_processor(images=img, return_tensors="tf").pixel_values decoder_input_ids = decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids with tempfile.TemporaryDirectory() as tmp_dirname: # Since most of HF's models don't have pretrained cross-attention layers, they are randomly # initialized even if we create models using `from_pretrained` method. # For the tests, the decoder need to be a model with pretrained cross-attention layers. # So we create pretrained models (without `load_weight_prefix`), save them, and later, # we load them using `from_pretrained`. # (we don't need to do this for encoder, but let's make the code more similar between encoder/decoder) encoder = TFAutoModel.from_pretrained("google/vit-base-patch16-224-in21k", name="encoder") # It's necessary to specify `add_cross_attention=True` here. decoder = TFAutoModelForCausalLM.from_pretrained( "gpt2", is_decoder=True, add_cross_attention=True, name="decoder" ) pretrained_encoder_dir = os.path.join(tmp_dirname, "pretrained_encoder") pretrained_decoder_dir = os.path.join(tmp_dirname, "pretrained_decoder") encoder.save_pretrained(pretrained_encoder_dir) decoder.save_pretrained(pretrained_decoder_dir) del encoder del decoder enc_dec_model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( pretrained_encoder_dir, pretrained_decoder_dir, ) enc_dec_model.build_in_name_scope() # check that the from pretrained methods work enc_dec_model.save_pretrained(tmp_dirname) enc_dec_model = TFVisionEncoderDecoderModel.from_pretrained(tmp_dirname) output = enc_dec_model(pixel_values, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids) loss_pretrained = output.loss del enc_dec_model # Create the model using `__init__` with loaded ``pretrained`` encoder / decoder encoder = TFAutoModel.from_pretrained( pretrained_encoder_dir, load_weight_prefix=load_weight_prefix, name="encoder" ) decoder = TFAutoModelForCausalLM.from_pretrained( pretrained_decoder_dir, load_weight_prefix=load_weight_prefix, name="decoder" ) enc_dec_model = TFVisionEncoderDecoderModel(config=config, encoder=encoder, decoder=decoder) output = enc_dec_model(pixel_values, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids) loss_init = output.loss max_diff = np.max(np.abs(loss_pretrained - loss_init)) expected_diff = 0.0 self.assertAlmostEqual(max_diff, expected_diff, places=4) @require_vision @require_tf class TFViT2GPT2ModelIntegrationTest(unittest.TestCase): @slow def test_inference_coco_en(self): loc = "ydshieh/vit-gpt2-coco-en" image_processor = ViTImageProcessor.from_pretrained(loc) tokenizer = AutoTokenizer.from_pretrained(loc) model = TFVisionEncoderDecoderModel.from_pretrained(loc) # We will verify our results on an image of cute cats img = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") pixel_values = image_processor(images=img, return_tensors="tf").pixel_values decoder_input_ids = tf.constant([[model.config.decoder_start_token_id]]) logits = model(pixel_values, decoder_input_ids)[0].numpy() # verify the logits expected_shape = (1, 1, model.config.decoder.vocab_size) self.assertEqual(logits.shape, expected_shape) EXPECTED_LOGIT_SLICE = np.array( [ -38.705807, -30.639929, -31.41903, -39.012012, -38.38696, -34.887207, -33.290855, -35.68447, -38.508484, -36.124645, ] ) max_diff = np.amax(np.abs(logits[0, 0, :10] - EXPECTED_LOGIT_SLICE)) self.assertLessEqual(max_diff, 1e-4) def generate_step(pixel_values): outputs = model.generate(pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True) output_ids = outputs.sequences preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True) preds = [pred.strip() for pred in preds] return preds preds = generate_step(pixel_values) # should produce # ["a cat laying on top of a couch next to another cat"] self.assertEqual(preds, ["a cat laying on top of a couch next to another cat"])
transformers/tests/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py/0
{ "file_path": "transformers/tests/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py", "repo_id": "transformers", "token_count": 18370 }
412
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import logging import os import sys import tempfile import unittest from pathlib import Path import datasets import numpy as np from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DistilBertForSequenceClassification, TextClassificationPipeline, TFAutoModelForSequenceClassification, pipeline, ) from transformers.pipelines import PIPELINE_REGISTRY, get_task from transformers.pipelines.base import Pipeline, _pad from transformers.testing_utils import ( TOKEN, USER, CaptureLogger, RequestCounter, backend_empty_cache, is_pipeline_test, is_staging_test, nested_simplify, require_tensorflow_probability, require_tf, require_torch, require_torch_accelerator, require_torch_or_tf, slow, torch_device, ) from transformers.utils import direct_transformers_import, is_tf_available, is_torch_available from transformers.utils import logging as transformers_logging sys.path.append(str(Path(__file__).parent.parent.parent / "utils")) from test_module.custom_pipeline import PairClassificationPipeline # noqa E402 logger = logging.getLogger(__name__) PATH_TO_TRANSFORMERS = os.path.join(Path(__file__).parent.parent.parent, "src/transformers") # Dynamically import the Transformers module to grab the attribute classes of the processor form their names. transformers_module = direct_transformers_import(PATH_TO_TRANSFORMERS) class ANY: def __init__(self, *_types): self._types = _types def __eq__(self, other): return isinstance(other, self._types) def __repr__(self): return f"ANY({', '.join(_type.__name__ for _type in self._types)})" @is_pipeline_test class CommonPipelineTest(unittest.TestCase): @require_torch def test_pipeline_iteration(self): from torch.utils.data import Dataset class MyDataset(Dataset): data = [ "This is a test", "This restaurant is great", "This restaurant is awful", ] def __len__(self): return 3 def __getitem__(self, i): return self.data[i] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) dataset = MyDataset() for output in text_classifier(dataset): self.assertEqual(output, {"label": ANY(str), "score": ANY(float)}) @require_torch def test_check_task_auto_inference(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") self.assertIsInstance(pipe, TextClassificationPipeline) @require_torch def test_pipeline_batch_size_global(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") self.assertEqual(pipe._batch_size, None) self.assertEqual(pipe._num_workers, None) pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", batch_size=2, num_workers=1) self.assertEqual(pipe._batch_size, 2) self.assertEqual(pipe._num_workers, 1) @require_torch def test_pipeline_pathlike(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") with tempfile.TemporaryDirectory() as d: pipe.save_pretrained(d) path = Path(d) newpipe = pipeline(task="text-classification", model=path) self.assertIsInstance(newpipe, TextClassificationPipeline) @require_torch def test_pipeline_override(self): class MyPipeline(TextClassificationPipeline): pass text_classifier = pipeline(model="hf-internal-testing/tiny-random-distilbert", pipeline_class=MyPipeline) self.assertIsInstance(text_classifier, MyPipeline) def test_check_task(self): task = get_task("gpt2") self.assertEqual(task, "text-generation") with self.assertRaises(RuntimeError): # Wrong framework get_task("espnet/siddhana_slurp_entity_asr_train_asr_conformer_raw_en_word_valid.acc.ave_10best") @require_torch def test_iterator_data(self): def data(n: int): for _ in range(n): yield "This is a test" pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") results = [] for out in pipe(data(10)): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) # When using multiple workers on streamable data it should still work # This will force using `num_workers=1` with a warning for now. results = [] for out in pipe(data(10), num_workers=2): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) @require_tf def test_iterator_data_tf(self): def data(n: int): for _ in range(n): yield "This is a test" pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", framework="tf") out = pipe("This is a test") results = [] for out in pipe(data(10)): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) @require_torch def test_unbatch_attentions_hidden_states(self): model = DistilBertForSequenceClassification.from_pretrained( "hf-internal-testing/tiny-random-distilbert", output_hidden_states=True, output_attentions=True ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-distilbert") text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer) # Used to throw an error because `hidden_states` are a tuple of tensors # instead of the expected tensor. outputs = text_classifier(["This is great !"] * 20, batch_size=32) self.assertEqual(len(outputs), 20) @is_pipeline_test class PipelineScikitCompatTest(unittest.TestCase): @require_torch def test_pipeline_predict_pt(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.predict(data) self.assertEqual(expected_output, actual_output) @require_tf def test_pipeline_predict_tf(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.predict(data) self.assertEqual(expected_output, actual_output) @require_torch def test_pipeline_transform_pt(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.transform(data) self.assertEqual(expected_output, actual_output) @require_tf def test_pipeline_transform_tf(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.transform(data) self.assertEqual(expected_output, actual_output) @is_pipeline_test class PipelinePadTest(unittest.TestCase): @require_torch def test_pipeline_padding(self): import torch items = [ { "label": "label1", "input_ids": torch.LongTensor([[1, 23, 24, 2]]), "attention_mask": torch.LongTensor([[0, 1, 1, 0]]), }, { "label": "label2", "input_ids": torch.LongTensor([[1, 23, 24, 43, 44, 2]]), "attention_mask": torch.LongTensor([[0, 1, 1, 1, 1, 0]]), }, ] self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"]) self.assertTrue( torch.allclose( _pad(items, "input_ids", 10, "right"), torch.LongTensor([[1, 23, 24, 2, 10, 10], [1, 23, 24, 43, 44, 2]]), ) ) self.assertTrue( torch.allclose( _pad(items, "input_ids", 10, "left"), torch.LongTensor([[10, 10, 1, 23, 24, 2], [1, 23, 24, 43, 44, 2]]), ) ) self.assertTrue( torch.allclose( _pad(items, "attention_mask", 0, "right"), torch.LongTensor([[0, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 0]]) ) ) @require_torch def test_pipeline_image_padding(self): import torch items = [ { "label": "label1", "pixel_values": torch.zeros((1, 3, 10, 10)), }, { "label": "label2", "pixel_values": torch.zeros((1, 3, 10, 10)), }, ] self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"]) self.assertTrue( torch.allclose( _pad(items, "pixel_values", 10, "right"), torch.zeros((2, 3, 10, 10)), ) ) @require_torch def test_pipeline_offset_mapping(self): import torch items = [ { "offset_mappings": torch.zeros([1, 11, 2], dtype=torch.long), }, { "offset_mappings": torch.zeros([1, 4, 2], dtype=torch.long), }, ] self.assertTrue( torch.allclose( _pad(items, "offset_mappings", 0, "right"), torch.zeros((2, 11, 2), dtype=torch.long), ), ) @is_pipeline_test class PipelineUtilsTest(unittest.TestCase): @require_torch def test_pipeline_dataset(self): from transformers.pipelines.pt_utils import PipelineDataset dummy_dataset = [0, 1, 2, 3] def add(number, extra=0): return number + extra dataset = PipelineDataset(dummy_dataset, add, {"extra": 2}) self.assertEqual(len(dataset), 4) outputs = [dataset[i] for i in range(4)] self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_iterator(self): from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [0, 1, 2, 3] def add(number, extra=0): return number + extra dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}) self.assertEqual(len(dataset), 4) outputs = list(dataset) self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_iterator_no_len(self): from transformers.pipelines.pt_utils import PipelineIterator def dummy_dataset(): for i in range(4): yield i def add(number, extra=0): return number + extra dataset = PipelineIterator(dummy_dataset(), add, {"extra": 2}) with self.assertRaises(TypeError): len(dataset) outputs = list(dataset) self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_batch_unbatch_iterator(self): from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [{"id": [0, 1, 2]}, {"id": [3]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]]} dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [{"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}]) @require_torch def test_pipeline_batch_unbatch_iterator_tensors(self): import torch from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [{"id": torch.LongTensor([[10, 20], [0, 1], [0, 2]])}, {"id": torch.LongTensor([[3]])}] def add(number, extra=0): return {"id": number["id"] + extra} dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual( nested_simplify(outputs), [{"id": [[12, 22]]}, {"id": [[2, 3]]}, {"id": [[2, 4]]}, {"id": [[5]]}] ) @require_torch def test_pipeline_chunk_iterator(self): from transformers.pipelines.pt_utils import PipelineChunkIterator def preprocess_chunk(n: int): for i in range(n): yield i dataset = [2, 3] dataset = PipelineChunkIterator(dataset, preprocess_chunk, {}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [0, 1, 0, 1, 2]) @require_torch def test_pipeline_pack_iterator(self): from transformers.pipelines.pt_utils import PipelinePackIterator def pack(item): return {"id": item["id"] + 1, "is_last": item["is_last"]} dataset = [ {"id": 0, "is_last": False}, {"id": 1, "is_last": True}, {"id": 0, "is_last": False}, {"id": 1, "is_last": False}, {"id": 2, "is_last": True}, ] dataset = PipelinePackIterator(dataset, pack, {}) outputs = list(dataset) self.assertEqual( outputs, [ [ {"id": 1}, {"id": 2}, ], [ {"id": 1}, {"id": 2}, {"id": 3}, ], ], ) @require_torch def test_pipeline_pack_unbatch_iterator(self): from transformers.pipelines.pt_utils import PipelinePackIterator dummy_dataset = [{"id": [0, 1, 2], "is_last": [False, True, False]}, {"id": [3], "is_last": [True]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]], "is_last": number["is_last"]} dataset = PipelinePackIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [[{"id": 2}, {"id": 3}], [{"id": 4}, {"id": 5}]]) # is_false Across batch dummy_dataset = [{"id": [0, 1, 2], "is_last": [False, False, False]}, {"id": [3], "is_last": [True]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]], "is_last": number["is_last"]} dataset = PipelinePackIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [[{"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}]]) def test_pipeline_negative_device(self): # To avoid regressing, pipeline used to accept device=-1 classifier = pipeline("text-generation", "hf-internal-testing/tiny-random-bert", device=-1) expected_output = [{"generated_text": ANY(str)}] actual_output = classifier("Test input.") self.assertEqual(expected_output, actual_output) @slow @require_torch def test_load_default_pipelines_pt(self): import torch from transformers.pipelines import SUPPORTED_TASKS set_seed_fn = lambda: torch.manual_seed(0) # noqa: E731 for task in SUPPORTED_TASKS.keys(): if task == "table-question-answering": # test table in seperate test due to more dependencies continue self.check_default_pipeline(task, "pt", set_seed_fn, self.check_models_equal_pt) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) @slow @require_tf def test_load_default_pipelines_tf(self): import tensorflow as tf from transformers.pipelines import SUPPORTED_TASKS set_seed_fn = lambda: tf.random.set_seed(0) # noqa: E731 for task in SUPPORTED_TASKS.keys(): if task == "table-question-answering": # test table in seperate test due to more dependencies continue self.check_default_pipeline(task, "tf", set_seed_fn, self.check_models_equal_tf) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() @slow @require_torch def test_load_default_pipelines_pt_table_qa(self): import torch set_seed_fn = lambda: torch.manual_seed(0) # noqa: E731 self.check_default_pipeline("table-question-answering", "pt", set_seed_fn, self.check_models_equal_pt) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) @slow @require_torch @require_torch_accelerator def test_pipeline_accelerator(self): pipe = pipeline("text-generation", device=torch_device) _ = pipe("Hello") @slow @require_torch @require_torch_accelerator def test_pipeline_accelerator_indexed(self): pipe = pipeline("text-generation", device=torch_device) _ = pipe("Hello") @slow @require_tf @require_tensorflow_probability def test_load_default_pipelines_tf_table_qa(self): import tensorflow as tf set_seed_fn = lambda: tf.random.set_seed(0) # noqa: E731 self.check_default_pipeline("table-question-answering", "tf", set_seed_fn, self.check_models_equal_tf) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() def check_default_pipeline(self, task, framework, set_seed_fn, check_models_equal_fn): from transformers.pipelines import SUPPORTED_TASKS, pipeline task_dict = SUPPORTED_TASKS[task] # test to compare pipeline to manually loading the respective model model = None relevant_auto_classes = task_dict[framework] if len(relevant_auto_classes) == 0: # task has no default logger.debug(f"{task} in {framework} has no default") return # by default use first class auto_model_cls = relevant_auto_classes[0] # retrieve correct model ids if task == "translation": # special case for translation pipeline which has multiple languages model_ids = [] revisions = [] tasks = [] for translation_pair in task_dict["default"].keys(): model_id, revision = task_dict["default"][translation_pair]["model"][framework] model_ids.append(model_id) revisions.append(revision) tasks.append(task + f"_{'_to_'.join(translation_pair)}") else: # normal case - non-translation pipeline model_id, revision = task_dict["default"]["model"][framework] model_ids = [model_id] revisions = [revision] tasks = [task] # check for equality for model_id, revision, task in zip(model_ids, revisions, tasks): # load default model try: set_seed_fn() model = auto_model_cls.from_pretrained(model_id, revision=revision) except ValueError: # first auto class is possible not compatible with model, go to next model class auto_model_cls = relevant_auto_classes[1] set_seed_fn() model = auto_model_cls.from_pretrained(model_id, revision=revision) # load default pipeline set_seed_fn() default_pipeline = pipeline(task, framework=framework) # compare pipeline model with default model models_are_equal = check_models_equal_fn(default_pipeline.model, model) self.assertTrue(models_are_equal, f"{task} model doesn't match pipeline.") logger.debug(f"{task} in {framework} succeeded with {model_id}.") def check_models_equal_pt(self, model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.parameters(), model2.parameters()): if model1_p.data.ne(model2_p.data).sum() > 0: models_are_equal = False return models_are_equal def check_models_equal_tf(self, model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.weights, model2.weights): if np.abs(model1_p.numpy() - model2_p.numpy()).sum() > 1e-5: models_are_equal = False return models_are_equal class CustomPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] return preprocess_kwargs, {}, {} def preprocess(self, text, maybe_arg=2): input_ids = self.tokenizer(text, return_tensors="pt") return input_ids def _forward(self, model_inputs): outputs = self.model(**model_inputs) return outputs def postprocess(self, model_outputs): return model_outputs["logits"].softmax(-1).numpy() @is_pipeline_test class CustomPipelineTest(unittest.TestCase): def test_warning_logs(self): transformers_logging.set_verbosity_debug() logger_ = transformers_logging.get_logger("transformers.pipelines.base") alias = "text-classification" # Get the original task, so we can restore it at the end. # (otherwise the subsequential tests in `TextClassificationPipelineTests` will fail) _, original_task, _ = PIPELINE_REGISTRY.check_task(alias) try: with CaptureLogger(logger_) as cm: PIPELINE_REGISTRY.register_pipeline(alias, PairClassificationPipeline) self.assertIn(f"{alias} is already registered", cm.out) finally: # restore PIPELINE_REGISTRY.supported_tasks[alias] = original_task def test_register_pipeline(self): PIPELINE_REGISTRY.register_pipeline( "custom-text-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification if is_torch_available() else None, tf_model=TFAutoModelForSequenceClassification if is_tf_available() else None, default={"pt": "hf-internal-testing/tiny-random-distilbert"}, type="text", ) assert "custom-text-classification" in PIPELINE_REGISTRY.get_supported_tasks() _, task_def, _ = PIPELINE_REGISTRY.check_task("custom-text-classification") self.assertEqual(task_def["pt"], (AutoModelForSequenceClassification,) if is_torch_available() else ()) self.assertEqual(task_def["tf"], (TFAutoModelForSequenceClassification,) if is_tf_available() else ()) self.assertEqual(task_def["type"], "text") self.assertEqual(task_def["impl"], PairClassificationPipeline) self.assertEqual(task_def["default"], {"model": {"pt": "hf-internal-testing/tiny-random-distilbert"}}) # Clean registry for next tests. del PIPELINE_REGISTRY.supported_tasks["custom-text-classification"] @require_torch_or_tf def test_dynamic_pipeline(self): PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification if is_torch_available() else None, tf_model=TFAutoModelForSequenceClassification if is_tf_available() else None, ) classifier = pipeline("pair-classification", model="hf-internal-testing/tiny-random-bert") # Clean registry as we won't need the pipeline to be in it for the rest to work. del PIPELINE_REGISTRY.supported_tasks["pair-classification"] with tempfile.TemporaryDirectory() as tmp_dir: classifier.save_pretrained(tmp_dir) # checks self.assertDictEqual( classifier.model.config.custom_pipelines, { "pair-classification": { "impl": "custom_pipeline.PairClassificationPipeline", "pt": ("AutoModelForSequenceClassification",) if is_torch_available() else (), "tf": ("TFAutoModelForSequenceClassification",) if is_tf_available() else (), } }, ) # Fails if the user forget to pass along `trust_remote_code=True` with self.assertRaises(ValueError): _ = pipeline(model=tmp_dir) new_classifier = pipeline(model=tmp_dir, trust_remote_code=True) # Using trust_remote_code=False forces the traditional pipeline tag old_classifier = pipeline("text-classification", model=tmp_dir, trust_remote_code=False) # Can't make an isinstance check because the new_classifier is from the PairClassificationPipeline class of a # dynamic module self.assertEqual(new_classifier.__class__.__name__, "PairClassificationPipeline") self.assertEqual(new_classifier.task, "pair-classification") results = new_classifier("I hate you", second_text="I love you") self.assertDictEqual( nested_simplify(results), {"label": "LABEL_0", "score": 0.505, "logits": [-0.003, -0.024]}, ) self.assertEqual(old_classifier.__class__.__name__, "TextClassificationPipeline") self.assertEqual(old_classifier.task, "text-classification") results = old_classifier("I hate you", text_pair="I love you") self.assertListEqual( nested_simplify(results), [{"label": "LABEL_0", "score": 0.505}], ) @require_torch_or_tf def test_cached_pipeline_has_minimum_calls_to_head(self): # Make sure we have cached the pipeline. _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") with RequestCounter() as counter: _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") self.assertEqual(counter["GET"], 0) self.assertEqual(counter["HEAD"], 1) self.assertEqual(counter.total_calls, 1) @require_torch def test_chunk_pipeline_batching_single_file(self): # Make sure we have cached the pipeline. pipe = pipeline(model="hf-internal-testing/tiny-random-Wav2Vec2ForCTC") ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] pipe = pipeline(model="hf-internal-testing/tiny-random-Wav2Vec2ForCTC") # For some reason scoping doesn't work if not using `self.` self.COUNT = 0 forward = pipe.model.forward def new_forward(*args, **kwargs): self.COUNT += 1 return forward(*args, **kwargs) pipe.model.forward = new_forward for out in pipe(audio, return_timestamps="char", chunk_length_s=3, stride_length_s=[1, 1], batch_size=1024): pass self.assertEqual(self.COUNT, 1) @require_torch @is_staging_test class DynamicPipelineTester(unittest.TestCase): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "I", "love", "hate", "you"] @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-dynamic-pipeline") except HTTPError: pass def test_push_to_hub_dynamic_pipeline(self): from transformers import BertConfig, BertForSequenceClassification, BertTokenizer PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification, ) config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertForSequenceClassification(config).eval() with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f"{USER}/test-dynamic-pipeline", token=self._token) repo = Repository(tmp_dir, clone_from=f"{USER}/test-dynamic-pipeline", token=self._token) vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) classifier = pipeline("pair-classification", model=model, tokenizer=tokenizer) # Clean registry as we won't need the pipeline to be in it for the rest to work. del PIPELINE_REGISTRY.supported_tasks["pair-classification"] classifier.save_pretrained(tmp_dir) # checks self.assertDictEqual( classifier.model.config.custom_pipelines, { "pair-classification": { "impl": "custom_pipeline.PairClassificationPipeline", "pt": ("AutoModelForSequenceClassification",), "tf": (), } }, ) repo.push_to_hub() # Fails if the user forget to pass along `trust_remote_code=True` with self.assertRaises(ValueError): _ = pipeline(model=f"{USER}/test-dynamic-pipeline") new_classifier = pipeline(model=f"{USER}/test-dynamic-pipeline", trust_remote_code=True) # Can't make an isinstance check because the new_classifier is from the PairClassificationPipeline class of a # dynamic module self.assertEqual(new_classifier.__class__.__name__, "PairClassificationPipeline") results = classifier("I hate you", second_text="I love you") new_results = new_classifier("I hate you", second_text="I love you") self.assertDictEqual(nested_simplify(results), nested_simplify(new_results)) # Using trust_remote_code=False forces the traditional pipeline tag old_classifier = pipeline( "text-classification", model=f"{USER}/test-dynamic-pipeline", trust_remote_code=False ) self.assertEqual(old_classifier.__class__.__name__, "TextClassificationPipeline") self.assertEqual(old_classifier.task, "text-classification") new_results = old_classifier("I hate you", text_pair="I love you") self.assertListEqual( nested_simplify([{"label": results["label"], "score": results["score"]}]), nested_simplify(new_results) )
transformers/tests/pipelines/test_pipelines_common.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_common.py", "repo_id": "transformers", "token_count": 14399 }
413
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow, torch_device from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class TextClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: tf_model_mapping = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def test_small_model_pt(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) outputs = text_classifier("This is great !", top_k=2) self.assertEqual( nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] ) outputs = text_classifier(["This is great !", "This is bad"], top_k=2) self.assertEqual( nested_simplify(outputs), [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ], ) outputs = text_classifier("This is great !", top_k=1) self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) # Legacy behavior outputs = text_classifier("This is great !", return_all_scores=False) self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) outputs = text_classifier("This is great !", return_all_scores=True) self.assertEqual( nested_simplify(outputs), [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] ) outputs = text_classifier(["This is great !", "Something else"], return_all_scores=True) self.assertEqual( nested_simplify(outputs), [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ], ) outputs = text_classifier(["This is great !", "Something else"], return_all_scores=False) self.assertEqual( nested_simplify(outputs), [ {"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_0", "score": 0.504}, ], ) @require_torch def test_accepts_torch_device(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt", device=torch_device, ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) @require_tf def test_small_model_tf(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) @slow @require_torch def test_pt_bert(self): text_classifier = pipeline("text-classification") outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 1.0}]) outputs = text_classifier("This is bad !") self.assertEqual(nested_simplify(outputs), [{"label": "NEGATIVE", "score": 1.0}]) outputs = text_classifier("Birds are a type of animal") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 0.988}]) @slow @require_tf def test_tf_bert(self): text_classifier = pipeline("text-classification", framework="tf") outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 1.0}]) outputs = text_classifier("This is bad !") self.assertEqual(nested_simplify(outputs), [{"label": "NEGATIVE", "score": 1.0}]) outputs = text_classifier("Birds are a type of animal") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 0.988}]) def get_test_pipeline(self, model, tokenizer, processor): text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer) return text_classifier, ["HuggingFace is in", "This is another test"] def run_pipeline_test(self, text_classifier, _): model = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 valid_inputs = "HuggingFace is in" outputs = text_classifier(valid_inputs) self.assertEqual(nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}]) self.assertTrue(outputs[0]["label"] in model.config.id2label.values()) valid_inputs = ["HuggingFace is in ", "Paris is in France"] outputs = text_classifier(valid_inputs) self.assertEqual( nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}, {"label": ANY(str), "score": ANY(float)}], ) self.assertTrue(outputs[0]["label"] in model.config.id2label.values()) self.assertTrue(outputs[1]["label"] in model.config.id2label.values()) # Forcing to get all results with `top_k=None` # This is NOT the legacy format outputs = text_classifier(valid_inputs, top_k=None) N = len(model.config.id2label.values()) self.assertEqual( nested_simplify(outputs), [[{"label": ANY(str), "score": ANY(float)}] * N, [{"label": ANY(str), "score": ANY(float)}] * N], ) valid_inputs = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"} outputs = text_classifier(valid_inputs) self.assertEqual( nested_simplify(outputs), {"label": ANY(str), "score": ANY(float)}, ) self.assertTrue(outputs["label"] in model.config.id2label.values()) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. invalid_input = [["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(ValueError): text_classifier(invalid_input) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility outputs = text_classifier([[["HuggingFace is in ", "Paris is in France"]]]) self.assertEqual( nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}], ) self.assertTrue(outputs[0]["label"] in model.config.id2label.values())
transformers/tests/pipelines/test_pipelines_text_classification.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_text_classification.py", "repo_id": "transformers", "token_count": 3566 }
414
# coding=utf-8 # Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_accelerate_available, is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def get_some_linear_layer(model): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_4h_to_h if is_accelerate_available(): from accelerate import PartialState from accelerate.logging import get_logger logger = get_logger(__name__) _ = PartialState() if is_torch_available(): import torch import torch.nn as nn class LoRALayer(nn.Module): """Wraps a linear layer with LoRA-like adapter - Used for testing purposes only""" def __init__(self, module: nn.Module, rank: int): super().__init__() self.module = module self.adapter = nn.Sequential( nn.Linear(module.in_features, rank, bias=False), nn.Linear(rank, module.out_features, bias=False), ) small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight, std=small_std) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def forward(self, input, *args, **kwargs): return self.module(input, *args, **kwargs) + self.adapter(input) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class BaseMixedInt8Test(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "bigscience/bloom-1b7" # Constant values EXPECTED_RELATIVE_DIFFERENCE = ( 1.540025 # This was obtained on a Quadro RTX 8000 so the number might slightly change ) input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of the family.\n") MAX_NEW_TOKENS = 10 def setUp(self): # Models and tokenizer self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) class MixedInt8Test(BaseMixedInt8Test): def setUp(self): super().setUp() # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.model_fp16 del self.model_8bit gc.collect() torch.cuda.empty_cache() def test_get_keys_to_not_convert_trust_remote_code(self): r""" Test the `get_keys_to_not_convert` function with `trust_remote_code` models. """ from accelerate import init_empty_weights from transformers.integrations.bitsandbytes import get_keys_to_not_convert model_id = "mosaicml/mpt-7b" config = AutoConfig.from_pretrained( model_id, trust_remote_code=True, revision="ada218f9a93b5f1c6dce48a4cc9ff01fcba431e7" ) with init_empty_weights(): model = AutoModelForCausalLM.from_config( config, trust_remote_code=True, code_revision="ada218f9a93b5f1c6dce48a4cc9ff01fcba431e7" ) self.assertEqual(get_keys_to_not_convert(model), ["transformer.wte"]) def test_get_keys_to_not_convert(self): r""" Test the `get_keys_to_not_convert` function. """ from accelerate import init_empty_weights from transformers import AutoModelForMaskedLM, Blip2ForConditionalGeneration, MptForCausalLM, OPTForCausalLM from transformers.integrations.bitsandbytes import get_keys_to_not_convert model_id = "mosaicml/mpt-7b" config = AutoConfig.from_pretrained(model_id, revision="72e5f594ce36f9cabfa2a9fd8f58b491eb467ee7") with init_empty_weights(): model = MptForCausalLM(config) # The order of the keys does not matter, so we sort them before comparing, same for the other tests. self.assertEqual(get_keys_to_not_convert(model).sort(), ["lm_head", "transformer.wte"].sort()) model_id = "Salesforce/blip2-opt-2.7b" config = AutoConfig.from_pretrained(model_id, revision="1ef7f63a8f0a144c13fdca8103eb7b4691c74cec") with init_empty_weights(): model = Blip2ForConditionalGeneration(config) self.assertEqual( get_keys_to_not_convert(model).sort(), ["language_model.lm_head", "language_model.model.decoder.embed_tokens"].sort(), ) model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") with init_empty_weights(): model = OPTForCausalLM(config) self.assertEqual(get_keys_to_not_convert(model).sort(), ["lm_head", "model.decoder.embed_tokens"].sort()) model_id = "roberta-large" config = AutoConfig.from_pretrained(model_id, revision="716877d372b884cad6d419d828bac6c85b3b18d9") with init_empty_weights(): model = AutoModelForMaskedLM.from_config(config) self.assertEqual( get_keys_to_not_convert(model).sort(), ["'roberta.embeddings.word_embeddings', 'lm_head', 'lm_head.decoder"].sort(), ) def test_quantization_config_json_serialization(self): r""" A simple test to check if the quantization config is correctly serialized and deserialized """ config = self.model_8bit.config self.assertTrue(hasattr(config, "quantization_config")) _ = config.to_dict() _ = config.to_diff_dict() _ = config.to_json_string() def test_original_dtype(self): r""" A simple test to check if the model succesfully stores the original dtype """ self.assertTrue(hasattr(self.model_8bit.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.model_8bit.config._pre_quantization_dtype == torch.float16) def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Int8Params mem_fp16 = self.model_fp16.get_memory_footprint() mem_8bit = self.model_8bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_8bit, self.EXPECTED_RELATIVE_DIFFERENCE) self.assertTrue(get_some_linear_layer(self.model_8bit).weight.__class__ == Int8Params) def test_linear_are_8bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from transformers import T5PreTrainedModel self.model_fp16.get_memory_footprint() self.model_8bit.get_memory_footprint() for name, module in self.model_8bit.named_modules(): if isinstance(module, torch.nn.Linear): if name not in ["lm_head"] + T5PreTrainedModel._keep_in_fp32_modules: self.assertTrue(module.weight.dtype == torch.int8) def test_llm_skip(self): r""" A simple test to check if `llm_int8_skip_modules` works as expected """ import bitsandbytes as bnb quantization_config = BitsAndBytesConfig(load_in_8bit=True, llm_int8_skip_modules=["classifier"]) seq_classification_model = AutoModelForSequenceClassification.from_pretrained( "roberta-large-mnli", quantization_config=quantization_config ) self.assertTrue(seq_classification_model.roberta.encoder.layer[0].output.dense.weight.dtype == torch.int8) self.assertTrue( isinstance(seq_classification_model.roberta.encoder.layer[0].output.dense, bnb.nn.Linear8bitLt) ) self.assertTrue(isinstance(seq_classification_model.classifier.dense, nn.Linear)) self.assertTrue(seq_classification_model.classifier.dense.weight.dtype != torch.int8) self.assertTrue(isinstance(seq_classification_model.classifier.out_proj, nn.Linear)) self.assertTrue(seq_classification_model.classifier.out_proj != torch.int8) def test_generate_quality(self): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_8bit.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality_config(self): r""" Test that loading the model with the config is equivalent """ bnb_config = BitsAndBytesConfig() bnb_config.load_in_8bit = True model_8bit_from_config = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_8bit_from_config.generate( input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_raise_if_config_and_load_in_8bit(self): r""" Test that loading the model with the config and `load_in_8bit` raises an error """ bnb_config = BitsAndBytesConfig() with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, load_in_8bit=True, device_map="auto", llm_int8_enable_fp32_cpu_offload=True, ) def test_device_and_dtype_assignment(self): r""" Test whether trying to cast (or assigning a device to) a model after converting it in 8-bit will throw an error. Checks also if other models are casted correctly. """ with self.assertRaises(ValueError): # Tries with `str` self.model_8bit.to("cpu") with self.assertRaises(ValueError): # Tries with a `dtype`` self.model_8bit.to(torch.float16) with self.assertRaises(ValueError): # Tries with a `device` self.model_8bit.to(torch.device("cuda:0")) with self.assertRaises(ValueError): # Tries with a `device` self.model_8bit.float() with self.assertRaises(ValueError): # Tries with a `device` self.model_8bit.half() # Test if we did not break anything encoded_input = self.tokenizer(self.input_text, return_tensors="pt") self.model_fp16 = self.model_fp16.to(torch.float32) _ = self.model_fp16.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Check this does not throw an error _ = self.model_fp16.to("cpu") # Check this does not throw an error _ = self.model_fp16.half() # Check this does not throw an error _ = self.model_fp16.float() def test_fp32_int8_conversion(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ model = AutoModelForSeq2SeqLM.from_pretrained("t5-small", load_in_8bit=True, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) def test_int8_serialization(self): r""" Test whether it is possible to serialize a model in 8-bit. """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname) # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_int8_serialization_regression(self): r""" Test whether it is possible to serialize a model in 8-bit - using not safetensors """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname, safe_serialization=False) # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_int8_serialization_sharded(self): r""" Test whether it is possible to serialize a model in 8-bit - sharded version. """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname, max_shard_size="200MB") # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname) linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "ybelkada/bloom-1b7-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class MixedInt8T5Test(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_name = "t5-small" cls.dense_act_model_name = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.input_text = "Translate in German: Hello, my dog is cute" def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ gc.collect() torch.cuda.empty_cache() def test_inference_without_keep_in_fp32(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration modules = T5ForConditionalGeneration._keep_in_fp32_modules T5ForConditionalGeneration._keep_in_fp32_modules = None # test with `t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) T5ForConditionalGeneration._keep_in_fp32_modules = modules def test_inference_with_keep_in_fp32(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `t5-small` uses `T5DenseReluDense`. We need to test both cases. """ import bitsandbytes as bnb from transformers import T5ForConditionalGeneration # test with `t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) def test_inference_with_keep_in_fp32_serialized(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly on a serialized model. `flan-t5-small` uses `T5DenseGatedActDense` whereas `t5-small` uses `T5DenseReluDense`. We need to test both cases. """ import bitsandbytes as bnb from transformers import T5ForConditionalGeneration # test with `t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model = T5ForConditionalGeneration.from_pretrained(tmp_dir) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) class MixedInt8ModelClassesTest(BaseMixedInt8Test): def setUp(self): super().setUp() # model_name self.model_name = "bigscience/bloom-560m" self.seq_to_seq_name = "t5-small" # Different types of model self.base_model = AutoModel.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # Sequence classification model self.sequence_model = AutoModelForSequenceClassification.from_pretrained( self.model_name, load_in_8bit=True, device_map="auto" ) # CausalLM model self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # Seq2seq model self.seq_to_seq_model = AutoModelForSeq2SeqLM.from_pretrained( self.seq_to_seq_name, load_in_8bit=True, device_map="auto" ) def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.base_model del self.sequence_model del self.model_8bit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def test_correct_head_class(self): r""" A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification) are kept in their native class. """ from bitsandbytes.nn import Int8Params # last param of a base model should be a linear8bit module self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Int8Params) # Other heads should be nn.Parameter self.assertTrue(self.model_8bit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) class MixedInt8TestPipeline(BaseMixedInt8Test): def setUp(self): super().setUp() def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.pipe gc.collect() torch.cuda.empty_cache() def test_pipeline(self): r""" The aim of this test is to verify that the mixed int8 is compatible with `pipeline` from transformers. Since we used pipline for inference speed benchmarking we want to make sure that this feature does not break anything on pipline. """ # self._clear_cuda_cache() self.pipe = pipeline( "text-generation", model=self.model_name, model_kwargs={"device_map": "auto", "load_in_8bit": True}, max_new_tokens=self.MAX_NEW_TOKENS, ) # Real second forward pass pipeline_output = self.pipe(self.input_text) self.assertIn(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUTS) @require_torch_multi_gpu class MixedInt8TestMultiGpu(BaseMixedInt8Test): def setUp(self): super().setUp() def test_multi_gpu_loading(self): r""" This tests that the model has been loaded and can be used correctly on a multi-GPU setup. Let's just try to load a model on 2 GPUs and see if it works. The model we test has ~2GB of total, 3GB should suffice """ model_parallel = AutoModelForCausalLM.from_pretrained( self.model_name, load_in_8bit=True, device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1}) # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Second real batch output_parallel = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) @require_torch_multi_gpu class MixedInt8TestCpuGpu(BaseMixedInt8Test): def setUp(self): super().setUp() def check_inference_correctness(self, model): # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_parallel = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation output_text = self.tokenizer.decode(output_parallel[0], skip_special_tokens=True) self.assertIn(output_text, self.EXPECTED_OUTPUTS) def test_cpu_gpu_loading_random_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a random `device_map`. """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True) model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_loading_custom_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time the device map is more organized than the test above and uses the abstraction `transformer.h` to encapsulate all the decoder layers. """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": "cpu", "lm_head": "cpu", "transformer.h": 0, "transformer.ln_f": 1, } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True) # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_disk_loading_custom_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time we also add `disk` on the device_map. """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": "cpu", "lm_head": 0, "transformer.h": 1, "transformer.ln_f": "disk", } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True) with tempfile.TemporaryDirectory() as tmpdirname: # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, offload_folder=tmpdirname, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_disk_loading_custom_device_map_kwargs(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time we also add `disk` on the device_map - using the kwargs directly instead of the quantization config """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": "cpu", "lm_head": 0, "transformer.h": 1, "transformer.ln_f": "disk", } with tempfile.TemporaryDirectory() as tmpdirname: # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True, offload_folder=tmpdirname, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"}) self.check_inference_correctness(model_8bit) class MixedInt8TestTraining(BaseMixedInt8Test): def setUp(self): self.model_name = "facebook/opt-350m" super().setUp() def test_training(self): if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.0"): return # Step 1: freeze all parameters model = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True) self.assertEqual(set(model.hf_device_map.values()), {torch.cuda.current_device()}) for param in model.parameters(): param.requires_grad = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability param.data = param.data.to(torch.float32) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(module)): module.q_proj = LoRALayer(module.q_proj, rank=16) module.k_proj = LoRALayer(module.k_proj, rank=16) module.v_proj = LoRALayer(module.v_proj, rank=16) # Step 3: dummy batch batch = self.tokenizer("Test batch ", return_tensors="pt").to(0) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): out = model.forward(**batch) out.logits.norm().backward() for module in model.modules(): if isinstance(module, LoRALayer): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(module, nn.Embedding): self.assertTrue(module.weight.grad is None) class MixedInt8GPT2Test(MixedInt8Test): model_name = "gpt2-xl" EXPECTED_RELATIVE_DIFFERENCE = 1.8720077507258357 EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John Doe, and I'm a big fan of") EXPECTED_OUTPUTS.add("Hello my name is John Doe, and I'm a fan of the") def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "ybelkada/gpt2-xl-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
transformers/tests/quantization/bnb/test_mixed_int8.py/0
{ "file_path": "transformers/tests/quantization/bnb/test_mixed_int8.py", "repo_id": "transformers", "token_count": 15531 }
415