text
stringlengths
7
318k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
439
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert LUKE checkpoint.""" import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def convert_luke_checkpoint(checkpoint_path, metadata_path, entity_vocab_path, pytorch_dump_folder_path, model_size): # Load configuration defined in the metadata file with open(metadata_path) as metadata_file: metadata = json.load(metadata_file) config = LukeConfig(use_entity_aware_attention=True, **metadata["model_config"]) # Load in the weights from the checkpoint_path state_dict = torch.load(checkpoint_path, map_location="cpu") # Load the entity vocab file entity_vocab = load_entity_vocab(entity_vocab_path) tokenizer = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"]) # Add special tokens to the token vocabulary for downstream tasks entity_token_1 = AddedToken("<ent>", lstrip=False, rstrip=False) entity_token_2 = AddedToken("<ent2>", lstrip=False, rstrip=False) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_1, entity_token_2]}) config.vocab_size += 2 print(f"Saving tokenizer to {pytorch_dump_folder_path}") tokenizer.save_pretrained(pytorch_dump_folder_path) with open(os.path.join(pytorch_dump_folder_path, LukeTokenizer.vocab_files_names["entity_vocab_file"]), "w") as f: json.dump(entity_vocab, f) tokenizer = LukeTokenizer.from_pretrained(pytorch_dump_folder_path) # Initialize the embeddings of the special tokens word_emb = state_dict["embeddings.word_embeddings.weight"] ent_emb = word_emb[tokenizer.convert_tokens_to_ids(["@"])[0]].unsqueeze(0) ent2_emb = word_emb[tokenizer.convert_tokens_to_ids(["#"])[0]].unsqueeze(0) state_dict["embeddings.word_embeddings.weight"] = torch.cat([word_emb, ent_emb, ent2_emb]) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers): for matrix_name in ["query.weight", "query.bias"]: prefix = f"encoder.layer.{layer_index}.attention.self." state_dict[prefix + "w2e_" + matrix_name] = state_dict[prefix + matrix_name] state_dict[prefix + "e2w_" + matrix_name] = state_dict[prefix + matrix_name] state_dict[prefix + "e2e_" + matrix_name] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks entity_emb = state_dict["entity_embeddings.entity_embeddings.weight"] entity_emb[entity_vocab["[MASK2]"]] = entity_emb[entity_vocab["[MASK]"]] model = LukeModel(config=config).eval() missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) if not (len(missing_keys) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f"Missing keys {', '.join(missing_keys)}. Expected only missing embeddings.position_ids") if not (all(key.startswith("entity_predictions") or key.startswith("lm_head") for key in unexpected_keys)): raise ValueError( "Unexpected keys" f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions') or key.startswith('lm_head'))])}" ) # Check outputs tokenizer = LukeTokenizer.from_pretrained(pytorch_dump_folder_path, task="entity_classification") text = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the" " new world number one avoid a humiliating second- round exit at Wimbledon ." ) span = (39, 42) encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt") outputs = model(**encoding) # Verify word hidden states if model_size == "large": expected_shape = torch.Size((1, 42, 1024)) expected_slice = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base expected_shape = torch.Size((1, 42, 768)) expected_slice = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]]) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4): raise ValueError # Verify entity hidden states if model_size == "large": expected_shape = torch.Size((1, 1, 1024)) expected_slice = torch.tensor([[0.0466, -0.0106, -0.0179]]) else: # base expected_shape = torch.Size((1, 1, 768)) expected_slice = torch.tensor([[0.1457, 0.1044, 0.0174]]) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" f" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4): raise ValueError # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(pytorch_dump_folder_path)) model.save_pretrained(pytorch_dump_folder_path) def load_entity_vocab(entity_vocab_path): entity_vocab = {} with open(entity_vocab_path, "r", encoding="utf-8") as f: for index, line in enumerate(f): title, _ = line.rstrip().split("\t") entity_vocab[title] = index return entity_vocab if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) args = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
transformers/src/transformers/models/luke/convert_luke_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/luke/convert_luke_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 2877 }
327
# coding=utf-8 # Copyright 2021 The Marian Team Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Marian model configuration""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging logger = logging.get_logger(__name__) MARIAN_PRETRAINED_CONFIG_ARCHIVE_MAP = { "Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json", # See all Marian models at https://huggingface.co/models?filter=marian } class MarianConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MarianModel`]. It is used to instantiate an Marian model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Marian [Helsinki-NLP/opus-mt-en-de](https://huggingface.co/Helsinki-NLP/opus-mt-en-de) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 58101): Vocabulary size of the Marian model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MarianModel`] or [`TFMarianModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) forced_eos_token_id (`int`, *optional*, defaults to 0): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Examples: ```python >>> from transformers import MarianModel, MarianConfig >>> # Initializing a Marian Helsinki-NLP/opus-mt-en-de style configuration >>> configuration = MarianConfig() >>> # Initializing a model from the Helsinki-NLP/opus-mt-en-de style configuration >>> model = MarianModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "marian" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self, vocab_size=58101, decoder_vocab_size=None, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=58100, scale_embedding=False, pad_token_id=58100, eos_token_id=0, forced_eos_token_id=0, share_encoder_decoder_embeddings=True, **kwargs, ): self.vocab_size = vocab_size self.decoder_vocab_size = decoder_vocab_size or vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.share_encoder_decoder_embeddings = share_encoder_decoder_embeddings super().__init__( pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, **kwargs, ) class MarianOnnxConfig(OnnxSeq2SeqConfigWithPast): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: common_inputs["decoder_input_ids"] = {0: "batch"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction="inputs") elif self.task == "causal-lm": # TODO: figure this case out. common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} else: common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: common_outputs = super().outputs else: common_outputs = super(OnnxConfigWithPast, self).outputs if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def _generate_dummy_inputs_for_default_and_seq2seq_lm( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: encoder_inputs = self._generate_dummy_inputs_for_encoder_and_decoder( tokenizer, batch_size, seq_length, is_pair, framework ) # Generate decoder inputs decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = self._generate_dummy_inputs_for_encoder_and_decoder( tokenizer, batch_size, decoder_seq_length, is_pair, framework ) decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, encoder_seq_length = common_inputs["input_ids"].shape decoder_seq_length = common_inputs["decoder_input_ids"].shape[1] num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads encoder_shape = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) decoder_past_length = decoder_seq_length + 3 decoder_shape = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) common_inputs["decoder_attention_mask"] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1 ) common_inputs["past_key_values"] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered num_encoder_layers, num_decoder_layers = self.num_layers min_num_layers = min(num_encoder_layers, num_decoder_layers) max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(min_num_layers): common_inputs["past_key_values"].append( ( torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape), ) ) # TODO: test this. shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(min_num_layers, max_num_layers): common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape))) return common_inputs def _generate_dummy_inputs_for_causal_lm( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: common_inputs = self._generate_dummy_inputs_for_encoder_and_decoder( tokenizer, batch_size, seq_length, is_pair, framework ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, seqlen = common_inputs["input_ids"].shape # Not using the same length for past_key_values past_key_values_length = seqlen + 2 num_encoder_layers, _ = self.num_layers num_encoder_attention_heads, _ = self.num_attention_heads past_shape = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) mask_dtype = common_inputs["attention_mask"].dtype common_inputs["attention_mask"] = torch.cat( [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1 ) common_inputs["past_key_values"] = [ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers) ] return common_inputs # Copied from BartOnnxConfig._generate_dummy_inputs_for_sequence_classification_and_question_answering # We renamed this function because Marian models do not have a sequence classification or question answering head def _generate_dummy_inputs_for_encoder_and_decoder( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension( batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX token_to_add = tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension( seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add ) # Generate dummy inputs according to compute batch and sequence dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) return common_inputs def generate_dummy_inputs( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) else: common_inputs = self._generate_dummy_inputs_for_causal_lm( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) return common_inputs # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig._flatten_past_key_values_ def _flatten_past_key_values_(self, flattened_output, name, idx, t): if self.task in ["default", "seq2seq-lm"]: flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_( flattened_output, name, idx, t ) @property def atol_for_validation(self) -> float: return 1e-4
transformers/src/transformers/models/marian/configuration_marian.py/0
{ "file_path": "transformers/src/transformers/models/marian/configuration_marian.py", "repo_id": "transformers", "token_count": 8188 }
328
# coding=utf-8 # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import sys from argparse import ArgumentParser from dataclasses import dataclass from pathlib import Path from pprint import pformat from typing import Any, Dict, Iterator, List, Set, Tuple import requests import torch import torchvision.transforms as T from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.projects.deeplab import add_deeplab_config from huggingface_hub import hf_hub_download from PIL import Image from torch import Tensor, nn from transformers import ( Mask2FormerConfig, Mask2FormerForUniversalSegmentation, Mask2FormerImageProcessor, Mask2FormerModel, SwinConfig, ) from transformers.models.mask2former.modeling_mask2former import ( Mask2FormerForUniversalSegmentationOutput, Mask2FormerModelOutput, ) from transformers.utils import logging StateDict = Dict[str, Tensor] logging.set_verbosity_info() logger = logging.get_logger() torch.manual_seed(0) class TrackedStateDict: def __init__(self, to_track: Dict): """This class "tracks" a python dictionary by keeping track of which item is accessed. Args: to_track (Dict): The dictionary we wish to track """ self.to_track = to_track self._seen: Set[str] = set() def __getitem__(self, key: str) -> Any: return self.to_track[key] def __setitem__(self, key: str, item: Any): self._seen.add(key) self.to_track[key] = item def diff(self) -> List[str]: """This method returns a set difference between the keys in the tracked state dict and the one we have access so far. This is an effective method to check if we have update all the keys Returns: List[str]: List of keys not yet updated """ return set(self.to_track.keys()) - self._seen def copy(self) -> Dict: # proxy the call to the internal dictionary return self.to_track.copy() # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" img_data = requests.get(url, stream=True).raw im = Image.open(img_data) return im @dataclass class Args: """Fake command line arguments needed by mask2former/detectron implementation""" config_file: str def setup_cfg(args: Args): # load config from file and command-line arguments cfg = get_cfg() add_deeplab_config(cfg) add_maskformer2_config(cfg) cfg.merge_from_file(args.config_file) cfg.freeze() return cfg class OriginalMask2FormerConfigToOursConverter: def __call__(self, original_config: object) -> Mask2FormerConfig: model = original_config.MODEL repo_id = "huggingface/label-files" if model.SEM_SEG_HEAD.NUM_CLASSES == 847: filename = "mask2former-ade20k-full-id2label.json" elif model.SEM_SEG_HEAD.NUM_CLASSES == 150: filename = "ade20k-id2label.json" elif model.SEM_SEG_HEAD.NUM_CLASSES == 80: filename = "coco-detection-mmdet-id2label.json" elif model.SEM_SEG_HEAD.NUM_CLASSES == 171: filename = "mask2former-coco-stuff-id2label.json" elif model.SEM_SEG_HEAD.NUM_CLASSES == 133: filename = "coco-panoptic-id2label.json" elif model.SEM_SEG_HEAD.NUM_CLASSES == 19: filename = "cityscapes-id2label.json" elif model.SEM_SEG_HEAD.NUM_CLASSES == 8: filename = "cityscapes-instance-id2label.json" elif model.SEM_SEG_HEAD.NUM_CLASSES == 65: filename = "mapillary-vistas-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {label: idx for idx, label in id2label.items()} if model.SWIN.EMBED_DIM == 96: backbone_config = SwinConfig.from_pretrained( "microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"] ) elif model.SWIN.EMBED_DIM == 128: backbone_config = SwinConfig( embed_dim=128, window_size=12, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), out_features=["stage1", "stage2", "stage3", "stage4"], ) elif model.SWIN.EMBED_DIM == 192: backbone_config = SwinConfig.from_pretrained( "microsoft/swin-large-patch4-window12-384", out_features=["stage1", "stage2", "stage3", "stage4"] ) else: raise ValueError(f"embed dim {model.SWIN.EMBED_DIM} not supported for Swin!") backbone_config.drop_path_rate = model.SWIN.DROP_PATH_RATE backbone_config.attention_probs_dropout_prob = model.SWIN.ATTN_DROP_RATE backbone_config.depths = model.SWIN.DEPTHS config: Mask2FormerConfig = Mask2FormerConfig( ignore_value=model.SEM_SEG_HEAD.IGNORE_VALUE, num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, num_queries=model.MASK_FORMER.NUM_OBJECT_QUERIES, no_object_weight=model.MASK_FORMER.NO_OBJECT_WEIGHT, class_weight=model.MASK_FORMER.CLASS_WEIGHT, mask_weight=model.MASK_FORMER.MASK_WEIGHT, dice_weight=model.MASK_FORMER.DICE_WEIGHT, train_num_points=model.MASK_FORMER.TRAIN_NUM_POINTS, oversample_ratio=model.MASK_FORMER.OVERSAMPLE_RATIO, importance_sample_ratio=model.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO, init_std=0.02, init_xavier_std=1.0, use_auxiliary_loss=model.MASK_FORMER.DEEP_SUPERVISION, feature_strides=[4, 8, 16, 32], backbone_config=backbone_config, id2label=id2label, label2id=label2id, feature_size=model.SEM_SEG_HEAD.CONVS_DIM, mask_feature_size=model.SEM_SEG_HEAD.MASK_DIM, hidden_dim=model.MASK_FORMER.HIDDEN_DIM, encoder_layers=model.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS, encoder_feedforward_dim=1024, decoder_layers=model.MASK_FORMER.DEC_LAYERS, num_attention_heads=model.MASK_FORMER.NHEADS, dropout=model.MASK_FORMER.DROPOUT, dim_feedforward=model.MASK_FORMER.DIM_FEEDFORWARD, pre_norm=model.MASK_FORMER.PRE_NORM, enforce_input_proj=model.MASK_FORMER.ENFORCE_INPUT_PROJ, common_stride=model.SEM_SEG_HEAD.COMMON_STRIDE, ) return config class OriginalMask2FormerConfigToImageProcessorConverter: def __call__(self, original_config: object) -> Mask2FormerImageProcessor: model = original_config.MODEL model_input = original_config.INPUT return Mask2FormerImageProcessor( image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(), image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(), size=model_input.MIN_SIZE_TEST, max_size=model_input.MAX_SIZE_TEST, num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, ignore_index=model.SEM_SEG_HEAD.IGNORE_VALUE, size_divisibility=32, ) class OriginalMask2FormerCheckpointToOursConverter: def __init__(self, original_model: nn.Module, config: Mask2FormerConfig): self.original_model = original_model self.config = config def pop_all(self, renamed_keys: List[Tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict): for src_key, dst_key in renamed_keys: dst_state_dict[dst_key] = src_state_dict.pop(src_key) def replace_maskformer_swin_backbone( self, dst_state_dict: StateDict, src_state_dict: StateDict, config: Mask2FormerConfig ): dst_prefix: str = "pixel_level_module.encoder" src_prefix: str = "backbone" renamed_keys = [ ( f"{src_prefix}.patch_embed.proj.weight", f"{dst_prefix}.model.embeddings.patch_embeddings.projection.weight", ), (f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.model.embeddings.patch_embeddings.projection.bias"), (f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.model.embeddings.norm.weight"), (f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.model.embeddings.norm.bias"), ] num_layers = len(config.backbone_config.depths) for layer_idx in range(num_layers): for block_idx in range(config.backbone_config.depths[layer_idx]): renamed_keys.extend( [ # src, dst ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table", ), ] ) # now we need to handle the attentions # read in weights + bias of input projection layer of cross-attention src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"] src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"] size = src_att_weight.shape[0] offset = size // 3 dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight" ] = src_att_weight[:offset, :] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias" ] = src_att_bias[:offset] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight" ] = src_att_weight[offset : offset * 2, :] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias" ] = src_att_bias[offset : offset * 2] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight" ] = src_att_weight[-offset:, :] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias" ] = src_att_bias[-offset:] # let's pop them src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight") src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias") # proj renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias", ), ] ) # second norm renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias", ), ] ) # mlp renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias", ), ] ) renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index", ) ] ) if layer_idx < num_layers - 1: # patch merging renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.reduction.weight", ), ( f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.weight", ), ( f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.bias", ), ] ) # hidden states norms renamed_keys.extend( [ ( f"{src_prefix}.norm{layer_idx}.weight", f"{dst_prefix}.hidden_states_norms.{layer_idx}.weight", ), ( f"{src_prefix}.norm{layer_idx}.bias", f"{dst_prefix}.hidden_states_norms.{layer_idx}.bias", ), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def replace_swin_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: Mask2FormerConfig): dst_prefix: str = "pixel_level_module.encoder" src_prefix: str = "backbone" renamed_keys = [ ( f"{src_prefix}.patch_embed.proj.weight", f"{dst_prefix}.embeddings.patch_embeddings.projection.weight", ), (f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.embeddings.patch_embeddings.projection.bias"), (f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.embeddings.norm.weight"), (f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.embeddings.norm.bias"), ] for layer_idx in range(len(config.backbone_config.depths)): for block_idx in range(config.backbone_config.depths[layer_idx]): renamed_keys.extend( [ # src, dst ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table", ), ] ) # now we need to handle the attentions # read in weights + bias of input projection layer of cross-attention src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"] src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"] size = src_att_weight.shape[0] offset = size // 3 dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight" ] = src_att_weight[:offset, :] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias" ] = src_att_bias[:offset] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight" ] = src_att_weight[offset : offset * 2, :] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias" ] = src_att_bias[offset : offset * 2] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight" ] = src_att_weight[-offset:, :] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias" ] = src_att_bias[-offset:] # let's pop them src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight") src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias") # proj renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias", ), ] ) # second norm renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias", ), ] ) # mlp renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias", ), ] ) renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index", ) ] ) if layer_idx < 3: # patch merging renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.reduction.weight", ), ( f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.weight", ), ( f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.bias", ), ] ) # hidden states norms renamed_keys.extend( [ ( f"{src_prefix}.norm{layer_idx}.weight", f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.weight", ), ( f"{src_prefix}.norm{layer_idx}.bias", f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.bias", ), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) # Backbone + Pixel Decoder def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "pixel_level_module.decoder" src_prefix: str = "sem_seg_head.pixel_decoder" self.replace_swin_backbone(dst_state_dict, src_state_dict, self.config) def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): return [ (f"{src_prefix}.weight", f"{dst_prefix}.weight"), (f"{src_prefix}.bias", f"{dst_prefix}.bias"), ] def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str): self_attn_keys = [] self_attn_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.attention_weights", f"{dst_prefix}.attention_weights") ) self_attn_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.output_proj", f"{dst_prefix}.output_proj") ) self_attn_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.sampling_offsets", f"{dst_prefix}.sampling_offsets") ) self_attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.value_proj", f"{dst_prefix}.value_proj")) return self_attn_keys def rename_keys_for_encoder_layer(src_prefix: str, dst_prefix: str): encoder_keys = [] encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.fc1")) encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.fc2")) encoder_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.norm1", f"{dst_prefix}.self_attn_layer_norm") ) encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm2", f"{dst_prefix}.final_layer_norm")) encoder_keys.extend(rename_keys_for_self_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn")) return encoder_keys # convolution layer for final features renamed_keys = [ (f"{src_prefix}.adapter_1.weight", f"{dst_prefix}.adapter_1.0.weight"), (f"{src_prefix}.adapter_1.norm.weight", f"{dst_prefix}.adapter_1.1.weight"), (f"{src_prefix}.adapter_1.norm.bias", f"{dst_prefix}.adapter_1.1.bias"), ] renamed_keys.extend( [ (f"{src_prefix}.layer_1.weight", f"{dst_prefix}.layer_1.0.weight"), (f"{src_prefix}.layer_1.norm.weight", f"{dst_prefix}.layer_1.1.weight"), (f"{src_prefix}.layer_1.norm.bias", f"{dst_prefix}.layer_1.1.bias"), ] ) # proj layers for i in range(3): for j in range(2): renamed_keys.extend( [ (f"{src_prefix}.input_proj.{i}.{j}.weight", f"{dst_prefix}.input_projections.{i}.{j}.weight"), (f"{src_prefix}.input_proj.{i}.{j}.bias", f"{dst_prefix}.input_projections.{i}.{j}.bias"), ] ) renamed_keys.extend([(f"{src_prefix}.transformer.level_embed", f"{dst_prefix}.level_embed")]) # layers for layer_idx in range(self.config.encoder_layers): renamed_keys.extend( rename_keys_for_encoder_layer( f"{src_prefix}.transformer.encoder.layers.{layer_idx}", f"{dst_prefix}.encoder.layers.{layer_idx}" ) ) # proj renamed_keys.extend( [ (f"{src_prefix}.mask_features.weight", f"{dst_prefix}.mask_projection.weight"), (f"{src_prefix}.mask_features.bias", f"{dst_prefix}.mask_projection.bias"), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) # Transformer Decoder def rename_keys_in_masked_attention_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module.decoder" src_prefix: str = "sem_seg_head.predictor" rename_keys = [] for i in range(self.config.decoder_layers - 1): rename_keys.append( ( f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.out_proj.weight", f"{dst_prefix}.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( ( f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.out_proj.bias", f"{dst_prefix}.layers.{i}.self_attn.out_proj.bias", ) ) rename_keys.append( ( f"{src_prefix}.transformer_self_attention_layers.{i}.norm.weight", f"{dst_prefix}.layers.{i}.self_attn_layer_norm.weight", ) ) rename_keys.append( ( f"{src_prefix}.transformer_self_attention_layers.{i}.norm.bias", f"{dst_prefix}.layers.{i}.self_attn_layer_norm.bias", ) ) rename_keys.append( ( f"{src_prefix}.transformer_cross_attention_layers.{i}.multihead_attn.in_proj_weight", f"{dst_prefix}.layers.{i}.cross_attn.in_proj_weight", ) ) rename_keys.append( ( f"{src_prefix}.transformer_cross_attention_layers.{i}.multihead_attn.in_proj_bias", f"{dst_prefix}.layers.{i}.cross_attn.in_proj_bias", ) ) rename_keys.append( ( f"{src_prefix}.transformer_cross_attention_layers.{i}.multihead_attn.out_proj.weight", f"{dst_prefix}.layers.{i}.cross_attn.out_proj.weight", ) ) rename_keys.append( ( f"{src_prefix}.transformer_cross_attention_layers.{i}.multihead_attn.out_proj.bias", f"{dst_prefix}.layers.{i}.cross_attn.out_proj.bias", ) ) rename_keys.append( ( f"{src_prefix}.transformer_cross_attention_layers.{i}.norm.weight", f"{dst_prefix}.layers.{i}.cross_attn_layer_norm.weight", ) ) rename_keys.append( ( f"{src_prefix}.transformer_cross_attention_layers.{i}.norm.bias", f"{dst_prefix}.layers.{i}.cross_attn_layer_norm.bias", ) ) rename_keys.append( (f"{src_prefix}.transformer_ffn_layers.{i}.linear1.weight", f"{dst_prefix}.layers.{i}.fc1.weight") ) rename_keys.append( (f"{src_prefix}.transformer_ffn_layers.{i}.linear1.bias", f"{dst_prefix}.layers.{i}.fc1.bias") ) rename_keys.append( (f"{src_prefix}.transformer_ffn_layers.{i}.linear2.weight", f"{dst_prefix}.layers.{i}.fc2.weight") ) rename_keys.append( (f"{src_prefix}.transformer_ffn_layers.{i}.linear2.bias", f"{dst_prefix}.layers.{i}.fc2.bias") ) rename_keys.append( ( f"{src_prefix}.transformer_ffn_layers.{i}.norm.weight", f"{dst_prefix}.layers.{i}.final_layer_norm.weight", ) ) rename_keys.append( ( f"{src_prefix}.transformer_ffn_layers.{i}.norm.bias", f"{dst_prefix}.layers.{i}.final_layer_norm.bias", ) ) return rename_keys def replace_masked_attention_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module.decoder" src_prefix: str = "sem_seg_head.predictor" renamed_keys = self.rename_keys_in_masked_attention_decoder(dst_state_dict, src_state_dict) # add more renamed_keys.extend( [ (f"{src_prefix}.decoder_norm.weight", f"{dst_prefix}.layernorm.weight"), (f"{src_prefix}.decoder_norm.bias", f"{dst_prefix}.layernorm.bias"), ] ) mlp_len = 3 for i in range(mlp_len): renamed_keys.extend( [ ( f"{src_prefix}.mask_embed.layers.{i}.weight", f"{dst_prefix}.mask_predictor.mask_embedder.{i}.0.weight", ), ( f"{src_prefix}.mask_embed.layers.{i}.bias", f"{dst_prefix}.mask_predictor.mask_embedder.{i}.0.bias", ), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def replace_keys_qkv_transformer_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module.decoder.layers" src_prefix: str = "sem_seg_head.predictor" for i in range(self.config.decoder_layers - 1): # read in weights + bias of input projection layer of self-attention in_proj_weight = src_state_dict.pop( f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_weight" ) in_proj_bias = src_state_dict.pop( f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict dst_state_dict[f"{dst_prefix}.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] dst_state_dict[f"{dst_prefix}.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] dst_state_dict[f"{dst_prefix}.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] dst_state_dict[f"{dst_prefix}.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] dst_state_dict[f"{dst_prefix}.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] dst_state_dict[f"{dst_prefix}.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module" src_prefix: str = "sem_seg_head.predictor" self.replace_masked_attention_decoder(dst_state_dict, src_state_dict) renamed_keys = [ (f"{src_prefix}.query_embed.weight", f"{dst_prefix}.queries_embedder.weight"), (f"{src_prefix}.query_feat.weight", f"{dst_prefix}.queries_features.weight"), (f"{src_prefix}.level_embed.weight", f"{dst_prefix}.level_embed.weight"), ] self.pop_all(renamed_keys, dst_state_dict, src_state_dict) self.replace_keys_qkv_transformer_decoder(dst_state_dict, src_state_dict) def replace_universal_segmentation_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "" src_prefix: str = "sem_seg_head.predictor" renamed_keys = [ (f"{src_prefix}.class_embed.weight", f"{dst_prefix}class_predictor.weight"), (f"{src_prefix}.class_embed.bias", f"{dst_prefix}class_predictor.bias"), ] logger.info(f"Replacing keys {pformat(renamed_keys)}") self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def convert(self, mask2former: Mask2FormerModel) -> Mask2FormerModel: dst_state_dict = TrackedStateDict(mask2former.state_dict()) src_state_dict = self.original_model.state_dict() self.replace_pixel_module(dst_state_dict, src_state_dict) self.replace_transformer_module(dst_state_dict, src_state_dict) logger.info(f"Missed keys are {pformat(dst_state_dict.diff())}") logger.info(f"Not copied keys are {pformat(src_state_dict.keys())}") logger.info("🙌 Done") state_dict = {key: dst_state_dict[key] for key in dst_state_dict.to_track.keys()} mask2former.load_state_dict(state_dict) return mask2former def convert_universal_segmentation( self, mask2former: Mask2FormerForUniversalSegmentation ) -> Mask2FormerForUniversalSegmentation: dst_state_dict = TrackedStateDict(mask2former.state_dict()) src_state_dict = self.original_model.state_dict() self.replace_universal_segmentation_module(dst_state_dict, src_state_dict) state_dict = {key: dst_state_dict[key] for key in dst_state_dict.to_track.keys()} mask2former.load_state_dict(state_dict) return mask2former @staticmethod def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[Tuple[object, Path, Path]]: checkpoints: List[Path] = checkpoints_dir.glob("**/*.pkl") for checkpoint in checkpoints: logger.info(f"💪 Converting {checkpoint.stem}") # find associated config file # dataset_name e.g 'coco' dataset_name = checkpoint.parents[2].stem if dataset_name == "ade": dataset_name = dataset_name.replace("ade", "ade20k") # task type e.g 'instance-segmentation' segmentation_task = checkpoint.parents[1].stem # config file corresponding to checkpoint config_file_name = f"{checkpoint.parents[0].stem}.yaml" config: Path = config_dir / dataset_name / segmentation_task / "swin" / config_file_name yield config, checkpoint def test( original_model, our_model: Mask2FormerForUniversalSegmentation, image_processor: Mask2FormerImageProcessor, tolerance: float, ): with torch.no_grad(): original_model = original_model.eval() our_model = our_model.eval() im = prepare_img() x = image_processor(images=im, return_tensors="pt")["pixel_values"] original_model_backbone_features = original_model.backbone(x.clone()) our_model_output: Mask2FormerModelOutput = our_model.model(x.clone(), output_hidden_states=True) # Test backbone for original_model_feature, our_model_feature in zip( original_model_backbone_features.values(), our_model_output.encoder_hidden_states ): assert torch.allclose( original_model_feature, our_model_feature, atol=tolerance ), "The backbone features are not the same." # Test pixel decoder mask_features, _, multi_scale_features = original_model.sem_seg_head.pixel_decoder.forward_features( original_model_backbone_features ) for original_model_feature, our_model_feature in zip( multi_scale_features, our_model_output.pixel_decoder_hidden_states ): assert torch.allclose( original_model_feature, our_model_feature, atol=tolerance ), "The pixel decoder feature are not the same" # Let's test the full model tr_complete = T.Compose( [T.Resize((384, 384)), T.ToTensor()], ) y = (tr_complete(im) * 255.0).to(torch.int).float() # modify original Mask2Former code to return mask and class logits original_class_logits, original_mask_logits = original_model([{"image": y.clone().squeeze(0)}]) our_model_out: Mask2FormerForUniversalSegmentationOutput = our_model(x.clone()) our_mask_logits = our_model_out.masks_queries_logits our_class_logits = our_model_out.class_queries_logits assert original_mask_logits.shape == our_mask_logits.shape, "Output masks shapes are not matching." assert original_class_logits.shape == our_class_logits.shape, "Output class logits shapes are not matching." assert torch.allclose( original_class_logits, our_class_logits, atol=tolerance ), "The class logits are not the same." assert torch.allclose( original_mask_logits, our_mask_logits, atol=tolerance ), "The predicted masks are not the same." logger.info("✅ Test passed!") def get_model_name(checkpoint_file: Path): # model_name_raw is something like maskformer2_swin_small_bs16_50ep model_name_raw: str = checkpoint_file.parents[0].stem # `segmentation_task_type` must be one of the following: `instance-segmentation`, `panoptic-segmentation`, `semantic-segmentation` segmentation_task_name: str = checkpoint_file.parents[1].stem if segmentation_task_name not in ["instance-segmentation", "panoptic-segmentation", "semantic-segmentation"]: raise ValueError( f"{segmentation_task_name} must be wrong since acceptable values are: instance-segmentation," " panoptic-segmentation, semantic-segmentation." ) # dataset name must be one of the following: `coco`, `ade`, `cityscapes`, `mapillary-vistas` dataset_name: str = checkpoint_file.parents[2].stem if dataset_name not in ["coco", "ade", "cityscapes", "mapillary-vistas"]: raise ValueError( f"{dataset_name} must be wrong since we didn't find 'coco' or 'ade' or 'cityscapes' or 'mapillary-vistas'" " in it " ) backbone = "swin" backbone_types = ["tiny", "small", "base_IN21k", "base", "large"] backbone_type = list(filter(lambda x: x in model_name_raw, backbone_types))[0].replace("_", "-") model_name = f"mask2former-{backbone}-{backbone_type}-{dataset_name}-{segmentation_task_name.split('-')[0]}" return model_name if __name__ == "__main__": parser = ArgumentParser( description="Command line to convert the original mask2formers (with swin backbone) to our implementations." ) parser.add_argument( "--checkpoints_dir", type=Path, help=( "A directory containing the model's checkpoints. The directory has to have the following structure:" " <DIR_NAME>/<DATASET_NAME>/<SEGMENTATION_TASK_NAME>/<CONFIG_NAME>.pkl" ), ) parser.add_argument( "--configs_dir", type=Path, help=( "A directory containing the model's configs, see detectron2 doc. The directory has to have the following" " structure: <DIR_NAME>/<DATASET_NAME>/<SEGMENTATION_TASK_NAME>/<CONFIG_NAME>.yaml" ), ) parser.add_argument( "--mask2former_dir", required=True, type=Path, help=( "A path to Mask2Former's original implementation directory. You can download from here:" " https://github.com/facebookresearch/Mask2Former" ), ) args = parser.parse_args() checkpoints_dir: Path = args.checkpoints_dir config_dir: Path = args.configs_dir mask2former_dir: Path = args.mask2former_dir # append the path to the parents to mask2former dir sys.path.append(str(mask2former_dir.parent)) # import original Mask2Former config and model from original source code repo from Mask2Former.mask2former.config import add_maskformer2_config from Mask2Former.mask2former.maskformer_model import MaskFormer as OriginalMask2Former for config_file, checkpoint_file in OriginalMask2FormerCheckpointToOursConverter.using_dirs( checkpoints_dir, config_dir ): model_name = get_model_name(checkpoint_file) image_processor = OriginalMask2FormerConfigToImageProcessorConverter()( setup_cfg(Args(config_file=config_file)) ) image_processor.size = {"height": 384, "width": 384} original_config = setup_cfg(Args(config_file=config_file)) mask2former_kwargs = OriginalMask2Former.from_config(original_config) original_model = OriginalMask2Former(**mask2former_kwargs).eval() DetectionCheckpointer(original_model).load(str(checkpoint_file)) config: Mask2FormerConfig = OriginalMask2FormerConfigToOursConverter()(original_config) mask2former = Mask2FormerModel(config=config).eval() converter = OriginalMask2FormerCheckpointToOursConverter(original_model, config) mask2former = converter.convert(mask2former) mask2former_for_segmentation = Mask2FormerForUniversalSegmentation(config=config).eval() mask2former_for_segmentation.model = mask2former mask2former_for_segmentation = converter.convert_universal_segmentation(mask2former_for_segmentation) tolerance = 3e-1 high_tolerance_models = [ "mask2former-swin-base-IN21k-coco-instance", "mask2former-swin-base-coco-instance", "mask2former-swin-small-cityscapes-semantic", ] if model_name in high_tolerance_models: tolerance = 3e-1 logger.info(f"🪄 Testing {model_name}...") test(original_model, mask2former_for_segmentation, image_processor, tolerance) logger.info(f"🪄 Pushing {model_name} to hub...") image_processor.push_to_hub(model_name) mask2former_for_segmentation.push_to_hub(model_name)
transformers/src/transformers/models/mask2former/convert_mask2former_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/mask2former/convert_mask2former_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 24038 }
329
# coding=utf-8 # Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Mixtral model.""" import inspect import math import warnings from typing import List, Optional, Tuple, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...modeling_attn_mask_utils import ( _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa, ) from ...modeling_outputs import ( MoeCausalLMOutputWithPast, MoeModelOutputWithPast, SequenceClassifierOutputWithPast, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import is_torch_greater_or_equal_than_1_13 from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings, ) from ...utils.import_utils import is_torch_fx_available from .configuration_mixtral import MixtralConfig if is_flash_attn_2_available(): from flash_attn import flash_attn_func, flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters) # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph. # It means that the function will not be traced through and simply appear as a node in the graph. if is_torch_fx_available(): if not is_torch_greater_or_equal_than_1_13: import torch.fx _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask) logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "MixtralConfig" def load_balancing_loss_func( gate_logits: torch.Tensor, num_experts: torch.Tensor = None, top_k=2, attention_mask: Optional[torch.Tensor] = None ) -> float: r""" Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between experts is too unbalanced. Args: gate_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]): Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of shape [batch_size X sequence_length, num_experts]. attention_mask (`torch.Tensor`, None): The attention_mask used in forward function shape [batch_size X sequence_length] if not None. num_experts (`int`, *optional*): Number of experts Returns: The auxiliary loss. """ if gate_logits is None or not isinstance(gate_logits, tuple): return 0 if isinstance(gate_logits, tuple): compute_device = gate_logits[0].device concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0) routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1) _, selected_experts = torch.topk(routing_weights, top_k, dim=-1) expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts) if attention_mask is None: # Compute the percentage of tokens routed to each experts tokens_per_expert = torch.mean(expert_mask.float(), dim=0) # Compute the average probability of routing to these experts router_prob_per_expert = torch.mean(routing_weights, dim=0) else: batch_size, sequence_length = attention_mask.shape num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length) # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask expert_attention_mask = ( attention_mask[None, :, :, None, None] .expand((num_hidden_layers, batch_size, sequence_length, 2, num_experts)) .reshape(-1, 2, num_experts) .to(compute_device) ) # Compute the percentage of tokens routed to each experts tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum( expert_attention_mask, dim=0 ) # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert router_per_expert_attention_mask = ( attention_mask[None, :, :, None] .expand((num_hidden_layers, batch_size, sequence_length, num_experts)) .reshape(-1, num_experts) .to(compute_device) ) # Compute the average probability of routing to these experts router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum( router_per_expert_attention_mask, dim=0 ) overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0)) return overall_loss * num_experts # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) return ( indices, cu_seqlens, max_seqlen_in_batch, ) # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Mixtral class MixtralRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ MixtralRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) # Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Mixtral class MixtralRotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache( seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() ) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) freqs = torch.outer(t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( self.cos_cached[:seq_len].to(dtype=x.dtype), self.sin_cached[:seq_len].to(dtype=x.dtype), ) # Copied from transformers.models.llama.modeling_llama.rotate_half def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`): The position indices of the tokens corresponding to the query and key tensors. For example, this can be used to pass offsetted position ids when working with a KV-cache. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos[position_ids].unsqueeze(unsqueeze_dim) sin = sin[position_ids].unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed # Copied from transformers.models.llama.modeling_llama.repeat_kv def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) # Copied from transformers.models.mistral.modeling_mistral.MistralAttention with Mistral->Mixtral class MixtralAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer and "Generating Long Sequences with Sparse Transformers". """ def __init__(self, config: MixtralConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.is_causal = True self.attention_dropout = config.attention_dropout if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.rotary_emb = MixtralRotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: if self.layer_idx is None: raise ValueError( f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " "with a layer index." ) kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with Mistral->Mixtral class MixtralFlashAttention2(MixtralAttention): """ Mixtral flash attention module. This module inherits from `MixtralAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, **kwargs, ): if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" ) # overwrite attention_mask with padding_mask attention_mask = kwargs.pop("padding_mask") bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: if self.layer_idx is None: raise ValueError( f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " "with a layer index." ) kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) # Because the input can be padded, the absolute sequence length depends on the max position id. rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1 cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) use_sliding_windows = ( _flash_supports_window_size and getattr(self.config, "sliding_window", None) is not None and kv_seq_len > self.config.sliding_window ) if not _flash_supports_window_size: logger.warning_once( "The current flash attention version does not support sliding window attention, for a more memory efficient implementation" " make sure to upgrade flash-attn library." ) if past_key_value is not None: # Activate slicing cache only if the config has a value `sliding_windows` attribute cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0 if ( getattr(self.config, "sliding_window", None) is not None and kv_seq_len > self.config.sliding_window and cache_has_contents ): slicing_tokens = 1 - self.config.sliding_window past_key = past_key_value[self.layer_idx][0] past_value = past_key_value[self.layer_idx][1] past_key = past_key[:, :, slicing_tokens:, :].contiguous() past_value = past_value[:, :, slicing_tokens:, :].contiguous() if past_key.shape[-2] != self.config.sliding_window - 1: raise ValueError( f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got" f" {past_key.shape}" ) if attention_mask is not None: attention_mask = attention_mask[:, slicing_tokens:] attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1) cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) dropout_rate = 0.0 if not self.training else self.attention_dropout # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in float16 just to be sure everything works as expected. input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) # Reashape to the expected shape for Flash Attention query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_output = self._flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, use_sliding_windows=use_sliding_windows, ) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value def _flash_attention_forward( self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None, use_sliding_windows=False, ): """ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token first unpad the input, then computes the attention scores and pad the final attention scores. Args: query_states (`torch.Tensor`): Input query states to be passed to Flash Attention API key_states (`torch.Tensor`): Input key states to be passed to Flash Attention API value_states (`torch.Tensor`): Input value states to be passed to Flash Attention API attention_mask (`torch.Tensor`): The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the position of padding tokens and 1 for the position of non-padding tokens. dropout (`int`, *optional*): Attention dropout softmax_scale (`float`, *optional*): The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) use_sliding_windows (`bool`, *optional*): Whether to activate sliding window attention. """ if not self._flash_attn_uses_top_left_mask: causal = self.is_causal else: # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. causal = self.is_causal and query_length != 1 # Contains at least one padding token in the sequence if attention_mask is not None: batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( query_states, key_states, value_states, attention_mask, query_length ) cu_seqlens_q, cu_seqlens_k = cu_seq_lens max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens if not use_sliding_windows: attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, ) else: attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, window_size=(self.config.sliding_window, self.config.sliding_window), ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) else: if not use_sliding_windows: attn_output = flash_attn_func( query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal, ) else: attn_output = flash_attn_func( query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal, window_size=(self.config.sliding_window, self.config.sliding_window), ) return attn_output def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape # On the first iteration we need to properly re-create the padding mask # by slicing it on the proper place if kv_seq_len != attention_mask.shape[-1]: attention_mask_num_tokens = attention_mask.shape[-1] attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :] indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) if query_length == kv_seq_len: query_layer = index_first_axis( query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k ) cu_seqlens_q = cu_seqlens_k max_seqlen_in_batch_q = max_seqlen_in_batch_k indices_q = indices_k elif query_length == 1: max_seqlen_in_batch_q = 1 cu_seqlens_q = torch.arange( batch_size + 1, dtype=torch.int32, device=query_layer.device ) # There is a memcpy here, that is very bad. indices_q = cu_seqlens_q[:-1] query_layer = query_layer.squeeze(1) else: # The -q_len: slice assumes left padding. attention_mask = attention_mask[:, -query_length:] query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) return ( query_layer, key_layer, value_layer, indices_q, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_in_batch_q, max_seqlen_in_batch_k), ) # Copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Mixtral class MixtralSdpaAttention(MixtralAttention): """ Mixtral attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from `MixtralAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to SDPA API. """ # Adapted from MixtralAttention.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. logger.warning_once( "MixtralModel is using MixtralSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. if query_states.device.type == "cuda" and attention_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=self.attention_dropout if self.training else 0.0, # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. is_causal=self.is_causal and attention_mask is None and q_len > 1, ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value MIXTRAL_ATTENTION_CLASSES = { "eager": MixtralAttention, "flash_attention_2": MixtralFlashAttention2, "sdpa": MixtralSdpaAttention, } class MixtralBlockSparseTop2MLP(nn.Module): def __init__(self, config: MixtralConfig): super().__init__() self.ffn_dim = config.intermediate_size self.hidden_dim = config.hidden_size self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False) self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False) self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False) self.act_fn = ACT2FN[config.hidden_act] def forward(self, hidden_states): current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states) current_hidden_states = self.w2(current_hidden_states) return current_hidden_states class MixtralBLockSparseTop2MLP(MixtralBlockSparseTop2MLP): def __init__(self, *args, **kwargs): logger.warning_once( "MixtralBLockSparseTop2MLP is deprecated by MixtralBlockSparseTop2MLP and will be removed in v4.40." ) super().__init__(*args, **kwargs) class MixtralSparseMoeBlock(nn.Module): """ This implementation is strictly equivalent to standard MoE with full capacity (no dropped tokens). It's faster since it formulates MoE operations in terms of block-sparse operations to accomodate imbalanced assignments of tokens to experts, whereas standard MoE either (1) drop tokens at the cost of reduced performance or (2) set capacity factor to number of experts and thus waste computation and memory on padding. """ def __init__(self, config): super().__init__() self.hidden_dim = config.hidden_size self.ffn_dim = config.intermediate_size self.num_experts = config.num_local_experts self.top_k = config.num_experts_per_tok # gating self.gate = nn.Linear(self.hidden_dim, self.num_experts, bias=False) self.experts = nn.ModuleList([MixtralBlockSparseTop2MLP(config) for _ in range(self.num_experts)]) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: """ """ batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_dim) # router_logits: (batch * sequence_length, n_experts) router_logits = self.gate(hidden_states) routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) routing_weights /= routing_weights.sum(dim=-1, keepdim=True) # we cast back to the input dtype routing_weights = routing_weights.to(hidden_states.dtype) final_hidden_states = torch.zeros( (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device ) # One hot encode the selected experts to create an expert mask # this will be used to easily index which expert is going to be sollicitated expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0) # Loop over all available experts in the model and perform the computation on each expert for expert_idx in range(self.num_experts): expert_layer = self.experts[expert_idx] idx, top_x = torch.where(expert_mask[expert_idx]) if top_x.shape[0] == 0: continue # in torch it is faster to index using lists than torch tensors top_x_list = top_x.tolist() idx_list = idx.tolist() # Index the correct hidden states and compute the expert hidden state for # the current expert. We need to make sure to multiply the output hidden # states by `routing_weights` on the corresponding tokens (top-1 and top-2) current_state = hidden_states[None, top_x_list].reshape(-1, hidden_dim) current_hidden_states = expert_layer(current_state) * routing_weights[top_x_list, idx_list, None] # However `index_add_` only support torch tensors for indexing so we'll use # the `top_x` tensor here. final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype)) final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim) return final_hidden_states, router_logits class MixtralDecoderLayer(nn.Module): def __init__(self, config: MixtralConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = MIXTRAL_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) self.block_sparse_moe = MixtralSparseMoeBlock(config) self.input_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, output_router_logits: Optional[bool] = False, use_cache: Optional[bool] = False, **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" ) """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states, router_logits = self.block_sparse_moe(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) if output_router_logits: outputs += (router_logits,) return outputs MIXTRAL_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MixtralConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare Mixtral Model outputting raw hidden-states without any specific head on top.", MIXTRAL_START_DOCSTRING, ) # Copied from transformers.models.mistral.modeling_mistral.MistralPreTrainedModel with Mistral->Mixtral class MixtralPreTrainedModel(PreTrainedModel): config_class = MixtralConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["MixtralDecoderLayer"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = True _supports_sdpa = True _supports_cache_class = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() MIXTRAL_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Mixtral Model outputting raw hidden-states without any specific head on top.", MIXTRAL_START_DOCSTRING, ) # Copied from transformers.models.mistral.modeling_mistral.MistralModel with MISTRAL->MIXTRAL,Mistral->Mixtral class MixtralModel(MixtralPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MixtralDecoderLayer`] Args: config: MixtralConfig """ def __init__(self, config: MixtralConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [MixtralDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self._attn_implementation = config._attn_implementation self.norm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value # Ignore copy @add_start_docstrings_to_model_forward(MIXTRAL_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MoeModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_router_logits = ( output_router_logits if output_router_logits is not None else self.config.output_router_logits ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") past_key_values_length = 0 if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if use_cache: use_legacy_cache = not isinstance(past_key_values, Cache) if use_legacy_cache: past_key_values = DynamicCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_usable_length(seq_length) if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache: is_padding_right = attention_mask[:, -1].sum().item() != batch_size if is_padding_right: raise ValueError( "You are attempting to perform batched generation with padding_side='right'" " this may lead to unexpected behaviour for Flash Attention version of Mixtral. Make sure to " " call `tokenizer.padding_side = 'left'` before tokenizing the input. " ) if self._attn_implementation == "flash_attention_2": # 2d mask is passed through the layers attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None elif self._attn_implementation == "sdpa" and not output_attentions: # output_attentions=True can not be supported when using SDPA, and we fall back on # the manual implementation that requires a 4D causal mask in all cases. attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, ) else: # 4d mask is passed through the layers attention_mask = _prepare_4d_causal_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, sliding_window=self.config.sliding_window, ) hidden_states = inputs_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_router_logits = () if output_router_logits else None next_decoder_cache = None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, position_ids, past_key_values, output_attentions, output_router_logits, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, output_router_logits=output_router_logits, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = layer_outputs[2 if output_attentions else 1] if output_attentions: all_self_attns += (layer_outputs[1],) if output_router_logits: all_router_logits += (layer_outputs[-1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = None if use_cache: next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits] if v is not None ) return MoeModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, router_logits=all_router_logits, ) class MixtralForCausalLM(MixtralPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.model = MixtralModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.router_aux_loss_coef = config.router_aux_loss_coef self.num_experts = config.num_local_experts self.num_experts_per_tok = config.num_experts_per_tok # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @add_start_docstrings_to_model_forward(MIXTRAL_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) # Ignore copy def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MoeCausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, MixtralForCausalLM >>> model = MixtralForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1") >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_router_logits = ( output_router_logits if output_router_logits is not None else self.config.output_router_logits ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) logits = logits.float() loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) aux_loss = None if output_router_logits: aux_loss = load_balancing_loss_func( outputs.router_logits if return_dict else outputs[-1], self.num_experts, self.num_experts_per_tok, attention_mask, ) if labels is not None: loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device if not return_dict: output = (logits,) + outputs[1:] if output_router_logits: output = (aux_loss,) + output return (loss,) + output if loss is not None else output return MoeCausalLMOutputWithPast( loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_logits, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): # Omit tokens covered by past_key_values if past_key_values is not None: if isinstance(past_key_values, Cache): cache_length = past_key_values.get_seq_length() past_length = past_key_values.seen_tokens max_cache_length = past_key_values.get_max_length() else: cache_length = past_length = past_key_values[0][0].shape[2] max_cache_length = None # Keep only the unprocessed tokens: # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as # input) if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard # input_ids based on the past_length. elif past_length < input_ids.shape[1]: input_ids = input_ids[:, past_length:] # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. if ( max_cache_length is not None and attention_mask is not None and cache_length + input_ids.shape[1] > max_cache_length ): attention_mask = attention_mask[:, -max_cache_length:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1] :] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past @add_start_docstrings( """ The Mixtral Model transformer with a sequence classification head on top (linear layer). [`MixtralForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, MIXTRAL_START_DOCSTRING, ) # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Mixtral, LLAMA->MIXTRAL class MixtralForSequenceClassification(MixtralPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = MixtralModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value @add_start_docstrings_to_model_forward(MIXTRAL_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )
transformers/src/transformers/models/mixtral/modeling_mixtral.py/0
{ "file_path": "transformers/src/transformers/models/mixtral/modeling_mixtral.py", "repo_id": "transformers", "token_count": 32171 }
330
# coding=utf-8 # Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch MobileNetV1 model.""" from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_v1 import MobileNetV1Config logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "MobileNetV1Config" # Base docstring _CHECKPOINT_FOR_DOC = "google/mobilenet_v1_1.0_224" _EXPECTED_OUTPUT_SHAPE = [1, 1024, 7, 7] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "google/mobilenet_v1_1.0_224" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/mobilenet_v1_1.0_224", "google/mobilenet_v1_0.75_192", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def _build_tf_to_pytorch_map(model, config, tf_weights=None): """ A map of modules from TF to PyTorch. """ tf_to_pt_map = {} if isinstance(model, MobileNetV1ForImageClassification): backbone = model.mobilenet_v1 else: backbone = model prefix = "MobilenetV1/Conv2d_0/" tf_to_pt_map[prefix + "weights"] = backbone.conv_stem.convolution.weight tf_to_pt_map[prefix + "BatchNorm/beta"] = backbone.conv_stem.normalization.bias tf_to_pt_map[prefix + "BatchNorm/gamma"] = backbone.conv_stem.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_stem.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_stem.normalization.running_var for i in range(13): tf_index = i + 1 pt_index = i * 2 pointer = backbone.layer[pt_index] prefix = f"MobilenetV1/Conv2d_{tf_index}_depthwise/" tf_to_pt_map[prefix + "depthwise_weights"] = pointer.convolution.weight tf_to_pt_map[prefix + "BatchNorm/beta"] = pointer.normalization.bias tf_to_pt_map[prefix + "BatchNorm/gamma"] = pointer.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.normalization.running_var pointer = backbone.layer[pt_index + 1] prefix = f"MobilenetV1/Conv2d_{tf_index}_pointwise/" tf_to_pt_map[prefix + "weights"] = pointer.convolution.weight tf_to_pt_map[prefix + "BatchNorm/beta"] = pointer.normalization.bias tf_to_pt_map[prefix + "BatchNorm/gamma"] = pointer.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.normalization.running_var if isinstance(model, MobileNetV1ForImageClassification): prefix = "MobilenetV1/Logits/Conv2d_1c_1x1/" tf_to_pt_map[prefix + "weights"] = model.classifier.weight tf_to_pt_map[prefix + "biases"] = model.classifier.bias return tf_to_pt_map def load_tf_weights_in_mobilenet_v1(model, config, tf_checkpoint_path): """Load TensorFlow checkpoints in a PyTorch model.""" try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model init_vars = tf.train.list_variables(tf_checkpoint_path) tf_weights = {} for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_checkpoint_path, name) tf_weights[name] = array # Build TF to PyTorch weights loading map tf_to_pt_map = _build_tf_to_pytorch_map(model, config, tf_weights) for name, pointer in tf_to_pt_map.items(): logger.info(f"Importing {name}") if name not in tf_weights: logger.info(f"{name} not in tf pre-trained weights, skipping") continue array = tf_weights[name] if "depthwise_weights" in name: logger.info("Transposing depthwise") array = np.transpose(array, (2, 3, 0, 1)) elif "weights" in name: logger.info("Transposing") if len(pointer.shape) == 2: # copying into linear layer array = array.squeeze().transpose() else: array = np.transpose(array, (3, 2, 0, 1)) if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") logger.info(f"Initialize PyTorch weight {name} {array.shape}") pointer.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + "/RMSProp", None) tf_weights.pop(name + "/RMSProp_1", None) tf_weights.pop(name + "/ExponentialMovingAverage", None) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}") return model def apply_tf_padding(features: torch.Tensor, conv_layer: nn.Conv2d) -> torch.Tensor: """ Apply TensorFlow-style "SAME" padding to a convolution layer. See the notes at: https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2 """ in_height, in_width = features.shape[-2:] stride_height, stride_width = conv_layer.stride kernel_height, kernel_width = conv_layer.kernel_size if in_height % stride_height == 0: pad_along_height = max(kernel_height - stride_height, 0) else: pad_along_height = max(kernel_height - (in_height % stride_height), 0) if in_width % stride_width == 0: pad_along_width = max(kernel_width - stride_width, 0) else: pad_along_width = max(kernel_width - (in_width % stride_width), 0) pad_left = pad_along_width // 2 pad_right = pad_along_width - pad_left pad_top = pad_along_height // 2 pad_bottom = pad_along_height - pad_top padding = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(features, padding, "constant", 0.0) class MobileNetV1ConvLayer(nn.Module): def __init__( self, config: MobileNetV1Config, in_channels: int, out_channels: int, kernel_size: int, stride: Optional[int] = 1, groups: Optional[int] = 1, bias: bool = False, use_normalization: Optional[bool] = True, use_activation: Optional[bool or str] = True, ) -> None: super().__init__() self.config = config if in_channels % groups != 0: raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.") if out_channels % groups != 0: raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.") padding = 0 if config.tf_padding else int((kernel_size - 1) / 2) self.convolution = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=bias, padding_mode="zeros", ) if use_normalization: self.normalization = nn.BatchNorm2d( num_features=out_channels, eps=config.layer_norm_eps, momentum=0.9997, affine=True, track_running_stats=True, ) else: self.normalization = None if use_activation: if isinstance(use_activation, str): self.activation = ACT2FN[use_activation] elif isinstance(config.hidden_act, str): self.activation = ACT2FN[config.hidden_act] else: self.activation = config.hidden_act else: self.activation = None def forward(self, features: torch.Tensor) -> torch.Tensor: if self.config.tf_padding: features = apply_tf_padding(features, self.convolution) features = self.convolution(features) if self.normalization is not None: features = self.normalization(features) if self.activation is not None: features = self.activation(features) return features class MobileNetV1PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MobileNetV1Config load_tf_weights = load_tf_weights_in_mobilenet_v1 base_model_prefix = "mobilenet_v1" main_input_name = "pixel_values" supports_gradient_checkpointing = False def _init_weights(self, module: Union[nn.Linear, nn.Conv2d]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.BatchNorm2d): module.bias.data.zero_() module.weight.data.fill_(1.0) MOBILENET_V1_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MOBILENET_V1_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", MOBILENET_V1_START_DOCSTRING, ) class MobileNetV1Model(MobileNetV1PreTrainedModel): def __init__(self, config: MobileNetV1Config, add_pooling_layer: bool = True): super().__init__(config) self.config = config depth = 32 out_channels = max(int(depth * config.depth_multiplier), config.min_depth) self.conv_stem = MobileNetV1ConvLayer( config, in_channels=config.num_channels, out_channels=out_channels, kernel_size=3, stride=2, ) strides = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] self.layer = nn.ModuleList() for i in range(13): in_channels = out_channels if strides[i] == 2 or i == 0: depth *= 2 out_channels = max(int(depth * config.depth_multiplier), config.min_depth) self.layer.append( MobileNetV1ConvLayer( config, in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=strides[i], groups=in_channels, ) ) self.layer.append( MobileNetV1ConvLayer( config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, ) ) self.pooler = nn.AdaptiveAvgPool2d((1, 1)) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): raise NotImplementedError @add_start_docstrings_to_model_forward(MOBILENET_V1_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.conv_stem(pixel_values) all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): hidden_states = layer_module(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) last_hidden_state = hidden_states if self.pooler is not None: pooled_output = torch.flatten(self.pooler(last_hidden_state), start_dim=1) else: pooled_output = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=all_hidden_states, ) @add_start_docstrings( """ MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """, MOBILENET_V1_START_DOCSTRING, ) class MobileNetV1ForImageClassification(MobileNetV1PreTrainedModel): def __init__(self, config: MobileNetV1Config) -> None: super().__init__(config) self.num_labels = config.num_labels self.mobilenet_v1 = MobileNetV1Model(config) last_hidden_size = self.mobilenet_v1.layer[-1].convolution.out_channels # Classifier head self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True) self.classifier = nn.Linear(last_hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILENET_V1_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilenet_v1(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(self.dropout(pooled_output)) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, )
transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py/0
{ "file_path": "transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py", "repo_id": "transformers", "token_count": 8136 }
331
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert MobileViTV2 checkpoints from the ml-cvnets library.""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTV2Config, MobileViTV2ForImageClassification, MobileViTV2ForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def load_orig_config_file(orig_cfg_file): print("Loading config file...") def flatten_yaml_as_dict(d, parent_key="", sep="."): items = [] for k, v in d.items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, collections.abc.MutableMapping): items.extend(flatten_yaml_as_dict(v, new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items) config = argparse.Namespace() with open(orig_cfg_file, "r") as yaml_file: try: cfg = yaml.load(yaml_file, Loader=yaml.FullLoader) flat_cfg = flatten_yaml_as_dict(cfg) for k, v in flat_cfg.items(): setattr(config, k, v) except yaml.YAMLError as exc: logger.error("Error while loading config file: {}. Error message: {}".format(orig_cfg_file, str(exc))) return config def get_mobilevitv2_config(task_name, orig_cfg_file): config = MobileViTV2Config() is_segmentation_model = False # dataset if task_name.startswith("imagenet1k_"): config.num_labels = 1000 if int(task_name.strip().split("_")[-1]) == 384: config.image_size = 384 else: config.image_size = 256 filename = "imagenet-1k-id2label.json" elif task_name.startswith("imagenet21k_to_1k_"): config.num_labels = 21000 if int(task_name.strip().split("_")[-1]) == 384: config.image_size = 384 else: config.image_size = 256 filename = "imagenet-22k-id2label.json" elif task_name.startswith("ade20k_"): config.num_labels = 151 config.image_size = 512 filename = "ade20k-id2label.json" is_segmentation_model = True elif task_name.startswith("voc_"): config.num_labels = 21 config.image_size = 512 filename = "pascal-voc-id2label.json" is_segmentation_model = True # orig_config orig_config = load_orig_config_file(orig_cfg_file) assert getattr(orig_config, "model.classification.name", -1) == "mobilevit_v2", "Invalid model" config.width_multiplier = getattr(orig_config, "model.classification.mitv2.width_multiplier", 1.0) assert ( getattr(orig_config, "model.classification.mitv2.attn_norm_layer", -1) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" config.hidden_act = getattr(orig_config, "model.classification.activation.name", "swish") # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: config.output_stride = getattr(orig_config, "model.segmentation.output_stride", 16) if "_deeplabv3" in task_name: config.atrous_rates = getattr(orig_config, "model.segmentation.deeplabv3.aspp_rates", [12, 24, 36]) config.aspp_out_channels = getattr(orig_config, "model.segmentation.deeplabv3.aspp_out_channels", 512) config.aspp_dropout_prob = getattr(orig_config, "model.segmentation.deeplabv3.aspp_dropout", 0.1) # id2label repo_id = "huggingface/label-files" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def create_rename_keys(state_dict, base_model=False): if base_model: model_prefix = "" else: model_prefix = "mobilevitv2." rename_keys = [] for k in state_dict.keys(): if k[:8] == "encoder.": k_new = k[8:] else: k_new = k if ".block." in k: k_new = k_new.replace(".block.", ".") if ".conv." in k: k_new = k_new.replace(".conv.", ".convolution.") if ".norm." in k: k_new = k_new.replace(".norm.", ".normalization.") if "conv_1." in k: k_new = k_new.replace("conv_1.", f"{model_prefix}conv_stem.") for i in [1, 2]: if f"layer_{i}." in k: k_new = k_new.replace(f"layer_{i}.", f"{model_prefix}encoder.layer.{i-1}.layer.") if ".exp_1x1." in k: k_new = k_new.replace(".exp_1x1.", ".expand_1x1.") if ".red_1x1." in k: k_new = k_new.replace(".red_1x1.", ".reduce_1x1.") for i in [3, 4, 5]: if f"layer_{i}.0." in k: k_new = k_new.replace(f"layer_{i}.0.", f"{model_prefix}encoder.layer.{i-1}.downsampling_layer.") if f"layer_{i}.1.local_rep.0." in k: k_new = k_new.replace(f"layer_{i}.1.local_rep.0.", f"{model_prefix}encoder.layer.{i-1}.conv_kxk.") if f"layer_{i}.1.local_rep.1." in k: k_new = k_new.replace(f"layer_{i}.1.local_rep.1.", f"{model_prefix}encoder.layer.{i-1}.conv_1x1.") for i in [3, 4, 5]: if i == 3: j_in = [0, 1] elif i == 4: j_in = [0, 1, 2, 3] elif i == 5: j_in = [0, 1, 2] for j in j_in: if f"layer_{i}.1.global_rep.{j}." in k: k_new = k_new.replace( f"layer_{i}.1.global_rep.{j}.", f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if f"layer_{i}.1.global_rep.{j+1}." in k: k_new = k_new.replace( f"layer_{i}.1.global_rep.{j+1}.", f"{model_prefix}encoder.layer.{i-1}.layernorm." ) if f"layer_{i}.1.conv_proj." in k: k_new = k_new.replace(f"layer_{i}.1.conv_proj.", f"{model_prefix}encoder.layer.{i-1}.conv_projection.") if "pre_norm_attn.0." in k: k_new = k_new.replace("pre_norm_attn.0.", "layernorm_before.") if "pre_norm_attn.1." in k: k_new = k_new.replace("pre_norm_attn.1.", "attention.") if "pre_norm_ffn.0." in k: k_new = k_new.replace("pre_norm_ffn.0.", "layernorm_after.") if "pre_norm_ffn.1." in k: k_new = k_new.replace("pre_norm_ffn.1.", "ffn.conv1.") if "pre_norm_ffn.3." in k: k_new = k_new.replace("pre_norm_ffn.3.", "ffn.conv2.") if "classifier.1." in k: k_new = k_new.replace("classifier.1.", "classifier.") if "seg_head." in k: k_new = k_new.replace("seg_head.", "segmentation_head.") if ".aspp_layer." in k: k_new = k_new.replace(".aspp_layer.", ".") if ".aspp_pool." in k: k_new = k_new.replace(".aspp_pool.", ".") rename_keys.append((k, k_new)) return rename_keys def remove_unused_keys(state_dict): """remove unused keys (e.g.: seg_head.aux_head)""" keys_to_ignore = [] for k in state_dict.keys(): if k.startswith("seg_head.aux_head."): keys_to_ignore.append(k) for k in keys_to_ignore: state_dict.pop(k, None) # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_mobilevitv2_checkpoint(task_name, checkpoint_path, orig_config_path, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our MobileViTV2 structure. """ config = get_mobilevitv2_config(task_name, orig_config_path) # load original state_dict checkpoint = torch.load(checkpoint_path, map_location="cpu") # load huggingface model if task_name.startswith("ade20k_") or task_name.startswith("voc_"): model = MobileViTV2ForSemanticSegmentation(config).eval() base_model = False else: model = MobileViTV2ForImageClassification(config).eval() base_model = False # remove and rename some keys of load the original model state_dict = checkpoint remove_unused_keys(state_dict) rename_keys = create_rename_keys(state_dict, base_model=base_model) for rename_key_src, rename_key_dest in rename_keys: rename_key(state_dict, rename_key_src, rename_key_dest) # load modified state_dict model.load_state_dict(state_dict) # Check outputs on an image, prepared by MobileViTImageProcessor image_processor = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32) encoding = image_processor(images=prepare_img(), return_tensors="pt") outputs = model(**encoding) # verify classification model if task_name.startswith("imagenet"): logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) if task_name.startswith("imagenet1k_256") and config.width_multiplier == 1.0: # expected_logits for base variant expected_logits = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01]) assert torch.allclose(logits[0, :3], expected_logits, atol=1e-4) Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {task_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " """ Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 """ ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) args = parser.parse_args() convert_mobilevitv2_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
transformers/src/transformers/models/mobilevitv2/convert_mlcvnets_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/mobilevitv2/convert_mlcvnets_to_pytorch.py", "repo_id": "transformers", "token_count": 5873 }
332
# coding=utf-8 # Copyright 2020, The T5 Authors and HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ mT5 model configuration""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeq2SeqConfigWithPast from ...utils import logging logger = logging.get_logger(__name__) class MT5Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MT5Model`] or a [`TFMT5Model`]. It is used to instantiate a mT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the mT5 [google/mt5-small](https://huggingface.co/google/mt5-small) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 250112): Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`T5Model`] or [`TFT5Model`]. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. In the conventional context, it is typically expected that `d_kv` has to be equal to `d_model // num_heads`. But in the architecture of mt5-small, `d_kv` is not equal to `d_model //num_heads`. The `inner_dim` of the projection layer will be defined as `num_heads * d_kv`. d_ff (`int`, *optional*, defaults to 1024): Size of the intermediate feed forward layer in each `T5Block`. num_layers (`int`, *optional*, defaults to 8): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 6): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. layer_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ model_type = "mt5" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self, vocab_size=250112, d_model=512, d_kv=64, d_ff=1024, num_layers=8, num_decoder_layers=None, num_heads=6, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-6, initializer_factor=1.0, feed_forward_proj="gated-gelu", is_encoder_decoder=True, use_cache=True, tokenizer_class="T5Tokenizer", tie_word_embeddings=False, pad_token_id=0, eos_token_id=1, decoder_start_token_id=0, classifier_dropout=0.0, **kwargs, ): self.vocab_size = vocab_size self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_decoder_layers = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.classifier_dropout = classifier_dropout self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.feed_forward_proj = feed_forward_proj self.use_cache = use_cache act_info = self.feed_forward_proj.split("-") self.dense_act_fn = act_info[-1] self.is_gated_act = act_info[0] == "gated" if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2: raise ValueError( f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. " "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": self.dense_act_fn = "gelu_new" super().__init__( is_encoder_decoder=is_encoder_decoder, tokenizer_class=tokenizer_class, tie_word_embeddings=tie_word_embeddings, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs, ) class MT5OnnxConfig(OnnxSeq2SeqConfigWithPast): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def inputs(self) -> Mapping[str, Mapping[int, str]]: common_inputs = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence" common_inputs["decoder_input_ids"] = {0: "batch"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction="inputs") return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def default_onnx_opset(self) -> int: return 13 @property def atol_for_validation(self) -> float: return 5e-4
transformers/src/transformers/models/mt5/configuration_mt5.py/0
{ "file_path": "transformers/src/transformers/models/mt5/configuration_mt5.py", "repo_id": "transformers", "token_count": 3273 }
333
# coding=utf-8 # Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Neighborhood Attention Transformer model.""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, OptionalDependencyNotAvailable, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_natten_available, logging, replace_return_docstrings, requires_backends, ) from ...utils.backbone_utils import BackboneMixin from .configuration_nat import NatConfig if is_natten_available(): from natten.functional import natten2dav, natten2dqkrpb else: def natten2dqkrpb(*args, **kwargs): raise OptionalDependencyNotAvailable() def natten2dav(*args, **kwargs): raise OptionalDependencyNotAvailable() logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "NatConfig" # Base docstring _CHECKPOINT_FOR_DOC = "shi-labs/nat-mini-in1k-224" _EXPECTED_OUTPUT_SHAPE = [1, 7, 7, 512] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "shi-labs/nat-mini-in1k-224" _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat" NAT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "shi-labs/nat-mini-in1k-224", # See all Nat models at https://huggingface.co/models?filter=nat ] # drop_path and NatDropPath are from the timm library. @dataclass class NatEncoderOutput(ModelOutput): """ Nat encoder's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class NatModelOutput(ModelOutput): """ Nat model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class NatImageClassifierOutput(ModelOutput): """ Nat outputs for image classification. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None class NatEmbeddings(nn.Module): """ Construct the patch and position embeddings. """ def __init__(self, config): super().__init__() self.patch_embeddings = NatPatchEmbeddings(config) self.norm = nn.LayerNorm(config.embed_dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor]: embeddings = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) embeddings = self.dropout(embeddings) return embeddings class NatPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, height, width, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() patch_size = config.patch_size num_channels, hidden_size = config.num_channels, config.embed_dim self.num_channels = num_channels if patch_size == 4: pass else: # TODO: Support arbitrary patch sizes. raise ValueError("Dinat only supports patch size of 4 at the moment.") self.projection = nn.Sequential( nn.Conv2d(self.num_channels, hidden_size // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), nn.Conv2d(hidden_size // 2, hidden_size, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), ) def forward(self, pixel_values: Optional[torch.FloatTensor]) -> torch.Tensor: _, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embeddings = self.projection(pixel_values) embeddings = embeddings.permute(0, 2, 3, 1) return embeddings class NatDownsampler(nn.Module): """ Convolutional Downsampling Layer. Args: dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. """ def __init__(self, dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: super().__init__() self.dim = dim self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) self.norm = norm_layer(2 * dim) def forward(self, input_feature: torch.Tensor) -> torch.Tensor: input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) input_feature = self.norm(input_feature) return input_feature # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Nat class NatDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) class NeighborhoodAttention(nn.Module): def __init__(self, config, dim, num_heads, kernel_size): super().__init__() if dim % num_heads != 0: raise ValueError( f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" ) self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.kernel_size = kernel_size # rpb is learnable relative positional biases; same concept is used Swin. self.rpb = nn.Parameter(torch.zeros(num_heads, (2 * self.kernel_size - 1), (2 * self.kernel_size - 1))) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 3, 1, 2, 4) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) # Apply the scale factor before computing attention weights. It's usually more efficient because # attention weights are typically a bigger tensor compared to query. # It gives identical results because scalars are commutable in matrix multiplication. query_layer = query_layer / math.sqrt(self.attention_head_size) # Compute NA between "query" and "key" to get the raw attention scores, and add relative positional biases. attention_scores = natten2dqkrpb(query_layer, key_layer, self.rpb, self.kernel_size, 1) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = natten2dav(attention_probs, value_layer, self.kernel_size, 1) context_layer = context_layer.permute(0, 2, 3, 1, 4).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class NeighborhoodAttentionOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class NeighborhoodAttentionModule(nn.Module): def __init__(self, config, dim, num_heads, kernel_size): super().__init__() self.self = NeighborhoodAttention(config, dim, num_heads, kernel_size) self.output = NeighborhoodAttentionOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class NatIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class NatOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class NatLayer(nn.Module): def __init__(self, config, dim, num_heads, drop_path_rate=0.0): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.kernel_size = config.kernel_size self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = NeighborhoodAttentionModule(config, dim, num_heads, kernel_size=self.kernel_size) self.drop_path = NatDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = NatIntermediate(config, dim) self.output = NatOutput(config, dim) self.layer_scale_parameters = ( nn.Parameter(config.layer_scale_init_value * torch.ones((2, dim)), requires_grad=True) if config.layer_scale_init_value > 0 else None ) def maybe_pad(self, hidden_states, height, width): window_size = self.kernel_size pad_values = (0, 0, 0, 0, 0, 0) if height < window_size or width < window_size: pad_l = pad_t = 0 pad_r = max(0, window_size - width) pad_b = max(0, window_size - height) pad_values = (0, 0, pad_l, pad_r, pad_t, pad_b) hidden_states = nn.functional.pad(hidden_states, pad_values) return hidden_states, pad_values def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, torch.Tensor]: batch_size, height, width, channels = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) # pad hidden_states if they are smaller than kernel size hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape attention_outputs = self.attention(hidden_states, output_attentions=output_attentions) attention_output = attention_outputs[0] was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_output = attention_output[:, :height, :width, :].contiguous() if self.layer_scale_parameters is not None: attention_output = self.layer_scale_parameters[0] * attention_output hidden_states = shortcut + self.drop_path(attention_output) layer_output = self.layernorm_after(hidden_states) layer_output = self.output(self.intermediate(layer_output)) if self.layer_scale_parameters is not None: layer_output = self.layer_scale_parameters[1] * layer_output layer_output = hidden_states + self.drop_path(layer_output) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs class NatStage(nn.Module): def __init__(self, config, dim, depth, num_heads, drop_path_rate, downsample): super().__init__() self.config = config self.dim = dim self.layers = nn.ModuleList( [ NatLayer( config=config, dim=dim, num_heads=num_heads, drop_path_rate=drop_path_rate[i], ) for i in range(depth) ] ) # patch merging layer if downsample is not None: self.downsample = downsample(dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: _, height, width, _ = hidden_states.size() for i, layer_module in enumerate(self.layers): layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] hidden_states_before_downsampling = hidden_states if self.downsample is not None: hidden_states = self.downsample(hidden_states_before_downsampling) stage_outputs = (hidden_states, hidden_states_before_downsampling) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs class NatEncoder(nn.Module): def __init__(self, config): super().__init__() self.num_levels = len(config.depths) self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] self.levels = nn.ModuleList( [ NatStage( config=config, dim=int(config.embed_dim * 2**i_layer), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path_rate=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], downsample=NatDownsampler if (i_layer < self.num_levels - 1) else None, ) for i_layer in range(self.num_levels) ] ) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, output_hidden_states_before_downsampling: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, NatEncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: # rearrange b h w c -> b c h w reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, layer_module in enumerate(self.levels): layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] hidden_states_before_downsampling = layer_outputs[1] if output_hidden_states and output_hidden_states_before_downsampling: # rearrange b h w c -> b c h w reshaped_hidden_state = hidden_states_before_downsampling.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and not output_hidden_states_before_downsampling: # rearrange b h w c -> b c h w reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[2:] if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return NatEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states, ) class NatPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = NatConfig base_model_prefix = "nat" main_input_name = "pixel_values" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) NAT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`NatConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ NAT_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Nat Model transformer outputting raw hidden-states without any specific head on top.", NAT_START_DOCSTRING, ) class NatModel(NatPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) requires_backends(self, ["natten"]) self.config = config self.num_levels = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_levels - 1)) self.embeddings = NatEmbeddings(config) self.encoder = NatEncoder(config) self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=NatModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, NatModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.flatten(1, 2).transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return NatModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, ) @add_start_docstrings( """ Nat Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. """, NAT_START_DOCSTRING, ) class NatForImageClassification(NatPreTrainedModel): def __init__(self, config): super().__init__(config) requires_backends(self, ["natten"]) self.num_labels = config.num_labels self.nat = NatModel(config) # Classifier head self.classifier = ( nn.Linear(self.nat.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=NatImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, NatImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nat( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return NatImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states, ) @add_start_docstrings( "NAT backbone, to be used with frameworks like DETR and MaskFormer.", NAT_START_DOCSTRING, ) class NatBackbone(NatPreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) requires_backends(self, ["natten"]) self.embeddings = NatEmbeddings(config) self.encoder = NatEncoder(config) self.num_features = [config.embed_dim] + [int(config.embed_dim * 2**i) for i in range(len(config.depths))] # Add layer norms to hidden states of out_features hidden_states_norms = {} for stage, num_channels in zip(self.out_features, self.channels): hidden_states_norms[stage] = nn.LayerNorm(num_channels) self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> BackboneOutput: """ Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224") >>> model = AutoBackbone.from_pretrained( ... "shi-labs/nat-mini-in1k-224", out_features=["stage1", "stage2", "stage3", "stage4"] ... ) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 512, 7, 7] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions embedding_output = self.embeddings(pixel_values) outputs = self.encoder( embedding_output, output_attentions=output_attentions, output_hidden_states=True, output_hidden_states_before_downsampling=True, return_dict=True, ) hidden_states = outputs.reshaped_hidden_states feature_maps = () for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: # TODO can we simplify this? batch_size, num_channels, height, width = hidden_state.shape hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous() hidden_state = hidden_state.view(batch_size, height * width, num_channels) hidden_state = self.hidden_states_norms[stage](hidden_state) hidden_state = hidden_state.view(batch_size, height, width, num_channels) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() feature_maps += (hidden_state,) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, )
transformers/src/transformers/models/nat/modeling_nat.py/0
{ "file_path": "transformers/src/transformers/models/nat/modeling_nat.py", "repo_id": "transformers", "token_count": 16471 }
334
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Tokenization classes for OpenAI GPT.""" from typing import Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_openai import OpenAIGPTTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/vocab.json"}, "merges_file": {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/merges.txt"}, "tokenizer_file": {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/tokenizer.json"}, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "openai-gpt": 512, } class OpenAIGPTTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" GPT Tokenizer (backed by HuggingFace's *tokenizers* library). Based on Byte-Pair-Encoding with the following peculiarities: - lower case all inputs - uses BERT's BasicTokenizer for pre-BPE tokenization This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = OpenAIGPTTokenizer def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token="<unk>", **kwargs): super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, **kwargs) @property def do_lower_case(self): return True def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
transformers/src/transformers/models/openai/tokenization_openai_fast.py/0
{ "file_path": "transformers/src/transformers/models/openai/tokenization_openai_fast.py", "repo_id": "transformers", "token_count": 1098 }
335
# coding=utf-8 # Copyright 2020 Google and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"} } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google/pegasus-xsum": 512, } logger = logging.get_logger(__name__) # TODO ArthurZ refactor this to only use the added_tokens_encoder class PegasusTokenizer(PreTrainedTokenizer): r""" Construct a PEGASUS tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. mask_token (`str`, *optional*, defaults to `"<mask_2>"`): The token used for masking single token values. This is the token used when training this model with masked language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining. It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/pdf/1912.08777.pdf). mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`): The token used for masking whole target sentences. This is the token used when training this model with gap sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/pdf/1912.08777.pdf). additional_special_tokens (`List[str]`, *optional*): Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and <unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66) that uses the tokens 2 - 104 only for pretraining sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, pad_token="<pad>", eos_token="</s>", unk_token="<unk>", mask_token="<mask_2>", mask_token_sent="<mask_1>", additional_special_tokens=None, offset=103, # entries 2 - 104 are only used for pretraining sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: self.offset = offset if additional_special_tokens is not None: if not isinstance(additional_special_tokens, list): raise TypeError( f"additional_special_tokens should be of type {type(list)}, but is" f" {type(additional_special_tokens)}" ) additional_special_tokens_extended = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"<unk_{i}>" for i in range(len(additional_special_tokens_extended), self.offset - 1) ] if len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." ) additional_special_tokens = additional_special_tokens_extended else: additional_special_tokens_extended = [] additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset)] self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.mask_token_sent = mask_token_sent self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) _added_tokens_decoder = { 0: AddedToken(str(pad_token), special=True), 1: AddedToken(str(eos_token), special=True), } if self.mask_token_sent is not None: _added_tokens_decoder[2] = AddedToken(mask_token_sent, special=True) _added_tokens_decoder[3] = AddedToken(str(mask_token), special=True) for i in range(2, self.offset): _added_tokens_decoder[len(_added_tokens_decoder)] = AddedToken(f"<unk_{i}>", special=True) # Force update as we want to make sure vocab is enforced (same as fast) self._added_tokens_decoder = kwargs.pop("added_tokens_decoder", {}) self._added_tokens_decoder.update(_added_tokens_decoder) super().__init__( eos_token=eos_token, unk_token=unk_token, mask_token=mask_token, pad_token=pad_token, mask_token_sent=mask_token_sent, offset=offset, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) @property def vocab_size(self) -> int: return len(self.sp_model) + self.offset def get_vocab(self) -> Dict[str, int]: vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _tokenize(self, text: str) -> List[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words""" return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token: str) -> int: """Converts a token (str) to an id using the vocab.""" sp_id = self.sp_model.piece_to_id(token) return sp_id + self.offset def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) to a token (str) using the vocab.""" if index < self.offset: return self.sp_model.IdToPiece(index) token = self.sp_model.IdToPiece(index - self.offset) return token def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" current_sub_tokens = [] out_string = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(current_sub_tokens) + token current_sub_tokens = [] else: current_sub_tokens.append(token) out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def num_special_tokens_to_add(self, pair=False): """Just EOS""" return 1 def _special_token_mask(self, seq): all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def get_special_tokens_mask( self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False ) -> List[int]: """Get list where entries are [1] if a token is [eos] or [pad] else 0.""" if already_has_special_tokens: return self._special_token_mask(token_ids_0) elif token_ids_1 is None: return self._special_token_mask(token_ids_0) + [1] else: return self._special_token_mask(token_ids_0 + token_ids_1) + [1] def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]: """ Build model inputs from a sequence or a pair of sequences for sequence classification tasks by concatenating and adding special tokens. A PEGASUS sequence has the following format, where `X` represents the sequence: - single sequence: `X </s>` - pair of sequences: `A B </s>` (not intended use) BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return token_ids_0 + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_0 + token_ids_1 + [self.eos_token_id] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,)
transformers/src/transformers/models/pegasus/tokenization_pegasus.py/0
{ "file_path": "transformers/src/transformers/models/pegasus/tokenization_pegasus.py", "repo_id": "transformers", "token_count": 5716 }
336
# coding=utf-8 # Copyright 2022, UCLA NLP, The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "uclanlp/plbart-base": "https://huggingface.co/uclanlp/plbart-base/resolve/main/sentencepiece.bpe.model", "uclanlp/plbart-c-cpp-defect-detection": ( "https://huggingface.co/uclanlp/plbart-c-cpp-defect-detection/resolve/main/sentencepiece.bpe.model" ), "uclanlp/plbart-cs-java": "https://huggingface.co/uclanlp/plbart-cs-java/resolve/main/sentencepiece.bpe.model", "uclanlp/plbart-en_XX-java": ( "https://huggingface.co/uclanlp/plbart-en_XX-java/resolve/main/sentencepiece.bpe.model" ), "uclanlp/plbart-go-en_XX": ( "https://huggingface.co/uclanlp/plbart-go-en_XX/resolve/main/sentencepiece.bpe.model" ), "uclanlp/plbart-java-clone-detection": ( "https://huggingface.co/uclanlp/plbart-java-clone-detection/resolve/main/sentencepiece.bpe.model" ), "uclanlp/plbart-java-cs": "https://huggingface.co/uclanlp/plbart-java-cs/resolve/main/sentencepiece.bpe.model", "uclanlp/plbart-java-en_XX": ( "https://huggingface.co/uclanlp/plbart-java-en_XX/resolve/main/sentencepiece.bpe.model" ), "uclanlp/plbart-javascript-en_XX": ( "https://huggingface.co/uclanlp/plbart-javascript-en_XX/resolve/main/sentencepiece.bpe.model" ), "uclanlp/plbart-php-en_XX": ( "https://huggingface.co/uclanlp/plbart-php-en_XX/resolve/main/sentencepiece.bpe.model" ), "uclanlp/plbart-python-en_XX": ( "https://huggingface.co/uclanlp/plbart-python-en_XX/resolve/main/sentencepiece.bpe.model" ), "uclanlp/plbart-refine-java-medium": ( "https://huggingface.co/uclanlp/plbart-refine-java-medium/resolve/main/sentencepiece.bpe.model" ), "uclanlp/plbart-refine-java-small": ( "https://huggingface.co/uclanlp/plbart-refine-java-small/resolve/main/sentencepiece.bpe.model" ), "uclanlp/plbart-ruby-en_XX": ( "https://huggingface.co/uclanlp/plbart-ruby-en_XX/resolve/main/sentencepiece.bpe.model" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "uclanlp/plbart-base": 1024, "uclanlp/plbart-c-cpp-defect-detection": 1024, "uclanlp/plbart-cs-java": 1024, "uclanlp/plbart-en_XX-java": 1024, "uclanlp/plbart-go-en_XX": 1024, "uclanlp/plbart-java-clone-detection": 1024, "uclanlp/plbart-java-cs": 1024, "uclanlp/plbart-java-en_XX": 1024, "uclanlp/plbart-javascript-en_XX": 1024, "uclanlp/plbart-php-en_XX": 1024, "uclanlp/plbart-python-en_XX": 1024, "uclanlp/plbart-refine-java-medium": 1024, "uclanlp/plbart-refine-java-small": 1024, "uclanlp/plbart-ruby-en_XX": 1024, } FAIRSEQ_LANGUAGE_CODES = { "base": ["__java__", "__python__", "__en_XX__"], "multi": ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"], } FAIRSEQ_LANGUAGE_CODES_MAP = { "java": "__java__", "python": "__python__", "en_XX": "__en_XX__", "javascript": "__javascript__", "php": "__php__", "ruby": "__ruby__", "go": "__go__", } class PLBartTokenizer(PreTrainedTokenizer): """ Construct an PLBART tokenizer. Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code> <tokens> <eos>` for target language documents. Args: vocab_file (`str`): Path to the vocabulary file. src_lang (`str`, *optional*): A string representing the source language. tgt_lang (`str`, *optional*): A string representing the target language. bos_token (`str`, *optional*, defaults to `"<s>"`): The start of sequence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The cls token, which is a special token used as the first token for all tasks. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token(`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masking tasks. This is only used in the `"base"` tokenizer type. For `"multi"` tokenizer, masking is never done for the downstream tasks. language_codes (`str`, *optional*, defaults to `"base"`): What language codes to use. Should be one of `"base"` or `"multi"`. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Examples: ```python >>> from transformers import PLBartTokenizer >>> tokenizer = PLBartTokenizer.from_pretrained("uclanlp/plbart-python-en_XX", src_lang="python", tgt_lang="en_XX") >>> example_python_phrase = "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])" >>> expected_translation_english = "Returns the maximum value of a b c." >>> inputs = tokenizer(example_python_phrase, text_target=expected_translation_english, return_tensors="pt") ```""" vocab_files_names = VOCAB_FILES_NAMES max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP model_input_names = ["input_ids", "attention_mask"] prefix_tokens: List[int] = [] suffix_tokens: List[int] = [] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", language_codes="base", tokenizer_file=None, src_lang=None, tgt_lang=None, sp_model_kwargs: Optional[Dict[str, Any]] = None, additional_special_tokens=None, **kwargs, ): # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs src_lang = self._convert_lang_code_special_format(src_lang) tgt_lang = self._convert_lang_code_special_format(tgt_lang) self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file self.language_codes = language_codes fairseq_language_codes = FAIRSEQ_LANGUAGE_CODES[self.language_codes] # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab self.fairseq_offset = 1 self.sp_model_size = len(self.sp_model) self.lang_code_to_id = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(fairseq_language_codes) } self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()} if self.language_codes == "base": self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _additional_special_tokens = list(self.lang_code_to_id.keys()) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) if self.language_codes == "base": self._src_lang = src_lang self.cur_lang_code_id = ( self.lang_code_to_id[self._src_lang] if self._src_lang is not None else self._src_lang ) else: self._src_lang = src_lang if src_lang is not None else "__en_XX__" self.cur_lang_code_id = self.lang_code_to_id[self._src_lang] super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, language_codes=language_codes, tokenizer_file=tokenizer_file, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=_additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self._src_lang) def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None state["sp_model_proto"] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) @property def vocab_size(self): if self.language_codes == "base": return ( len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 ) # Plus 1 for the mask token else: return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset @property def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: new_src_lang = self._convert_lang_code_special_format(new_src_lang) self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] * len(self.suffix_tokens) if token_ids_1 is None: return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An PLBART sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. PLBart does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def _build_translation_inputs( self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs ): """Used by translation pipeline, to prepare inputs for the generate function""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") self.src_lang = self._convert_lang_code_special_format(src_lang) self.tgt_lang = self._convert_lang_code_special_format(tgt_lang) inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs) tgt_lang_id = self.convert_tokens_to_ids(self.tgt_lang) inputs["forced_bos_token_id"] = tgt_lang_id return inputs def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] spm_id = self.sp_model.PieceToId(token) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def prepare_seq2seq_batch( self, src_texts: List[str], src_lang: str = "en_XX", tgt_texts: Optional[List[str]] = None, tgt_lang: str = "python", **kwargs, ) -> BatchEncoding: self.src_lang = self._convert_lang_code_special_format(src_lang) self.tgt_lang = self._convert_lang_code_special_format(tgt_lang) return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _switch_to_input_mode(self): return self.set_src_lang_special_tokens(self.src_lang) def _switch_to_target_mode(self): return self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang) -> None: """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code].""" src_lang = self._convert_lang_code_special_format(src_lang) self.cur_lang_code = self.lang_code_to_id[src_lang] if src_lang is not None else None self.prefix_tokens = [] if self.cur_lang_code is not None: self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] else: self.suffix_tokens = [self.eos_token_id] def set_tgt_lang_special_tokens(self, lang: str) -> None: """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code].""" lang = self._convert_lang_code_special_format(lang) self.cur_lang_code = self.lang_code_to_id[lang] if lang is not None else None self.prefix_tokens = [] if self.cur_lang_code is not None: self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] else: self.suffix_tokens = [self.eos_token_id] def _convert_lang_code_special_format(self, lang: str) -> str: """Convert Language Codes to format tokenizer uses if required""" lang = FAIRSEQ_LANGUAGE_CODES_MAP[lang] if lang in FAIRSEQ_LANGUAGE_CODES_MAP.keys() else lang return lang
transformers/src/transformers/models/plbart/tokenization_plbart.py/0
{ "file_path": "transformers/src/transformers/models/plbart/tokenization_plbart.py", "repo_id": "transformers", "token_count": 9661 }
337
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ProphetNet checkpoint.""" import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging logger = logging.get_logger(__name__) logging.set_verbosity_info() def convert_prophetnet_checkpoint_to_pytorch(prophetnet_checkpoint_path: str, pytorch_dump_folder_path: str): """ Copy/paste/tweak prohpetnet's weights to our prophetnet structure. """ if "xprophetnet" in prophetnet_checkpoint_path: prophet_old = XLMProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path) prophet, loading_info = XLMProphetNetForConditionalGeneration.from_pretrained( prophetnet_checkpoint_path, output_loading_info=True ) else: prophet_old = ProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path) prophet, loading_info = ProphetNetForConditionalGeneration.from_pretrained( prophetnet_checkpoint_path, output_loading_info=True ) special_keys = ["key_proj", "value_proj", "query_proj"] mapping = { "self_attn": "ngram_self_attn", "cross_attn": "encoder_attn", "cross_attn_layer_norm": "encoder_attn_layer_norm", "feed_forward_layer_norm": "final_layer_norm", "feed_forward": "", "intermediate": "fc1", "output": "fc2", "key_proj": "k_proj", "query_proj": "q_proj", "value_proj": "v_proj", "word_embeddings": "embed_tokens", "embeddings_layer_norm": "emb_layer_norm", "relative_pos_embeddings": "relative_linear", "ngram_embeddings": "ngram_input_embed", "position_embeddings": "embed_positions", } for key in loading_info["missing_keys"]: attributes = key.split(".") if attributes[0] == "lm_head": model = prophet old_model = prophet_old else: model = prophet.prophetnet old_model = prophet_old.model is_key_init = False for attribute in attributes: if attribute in mapping: old_attribute = mapping[attribute] if not hasattr(old_model, old_attribute) and len(old_attribute) > 0: old_attribute = attribute elif hasattr(old_model, attribute): old_attribute = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" model.weight = old_model.weight logger.info(f"{attribute} is initialized.") is_key_init = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" model.bias = old_model.bias logger.info(f"{attribute} is initialized") is_key_init = True break elif attribute in special_keys and hasattr(old_model, "in_proj_weight"): embed_dim = old_model.in_proj_weight.shape[0] // 3 param = getattr(model, attribute) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": model.query_proj.weight = nn.Parameter(old_model.in_proj_weight[:embed_dim, :]) model.query_proj.bias = nn.Parameter(old_model.in_proj_bias[:embed_dim]) elif attribute == "key_proj": model.key_proj.weight = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :]) model.key_proj.bias = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim]) elif attribute == "value_proj": model.value_proj.weight = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :]) model.value_proj.bias = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :]) is_key_init = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." model.position_embeddings.weight = nn.Parameter(old_model.embed_positions.weight[:512, :]) is_key_init = True break if attribute.isdigit(): model = model[int(attribute)] old_model = old_model[int(old_attribute)] else: model = getattr(model, attribute) if old_attribute == "": old_model = old_model else: if not hasattr(old_model, old_attribute): raise ValueError(f"{old_model} does not have {old_attribute}") old_model = getattr(old_model, old_attribute) if not is_key_init: raise ValueError(f"{key} was not correctly initialized!") print(f"Saving model to {pytorch_dump_folder_path}") prophet.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
transformers/src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 3107 }
338
# coding=utf-8 # Copyright 2020 The Trax Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for model Reformer.""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "google/reformer-crime-and-punishment": ( "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model" ) } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google/reformer-crime-and-punishment": 524288, } class ReformerTokenizer(PreTrainedTokenizer): """ Construct a Reformer tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece) . This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. additional_special_tokens (`List[str]`, *optional*, defaults to `[]`): Additional special tokens used by the tokenizer. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, eos_token="</s>", unk_token="<unk>", additional_special_tokens=[], sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) super().__init__( eos_token=eos_token, unk_token=unk_token, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) @property def vocab_size(self): return self.sp_model.get_piece_size() def get_vocab(self) -> Dict[str, int]: vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _tokenize(self, text: str) -> List[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words""" return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index < self.sp_model.get_piece_size(): token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" current_sub_tokens = [] out_string = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(current_sub_tokens) + token current_sub_tokens = [] else: current_sub_tokens.append(token) out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,)
transformers/src/transformers/models/reformer/tokenization_reformer.py/0
{ "file_path": "transformers/src/transformers/models/reformer/tokenization_reformer.py", "repo_id": "transformers", "token_count": 3018 }
339
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert RoBERTa-PreLayerNorm checkpoint.""" import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def convert_roberta_prelayernorm_checkpoint_to_pytorch(checkpoint_repo: str, pytorch_dump_folder_path: str): """ Copy/paste/tweak roberta_prelayernorm's weights to our BERT structure. """ # convert configuration config = RobertaPreLayerNormConfig.from_pretrained( checkpoint_repo, architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict original_state_dict = torch.load(hf_hub_download(repo_id=checkpoint_repo, filename="pytorch_model.bin")) state_dict = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta."): tensor_key = "roberta_prelayernorm." + tensor_key[len("roberta.") :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight") or tensor_key.endswith(".self.LayerNorm.bias"): continue state_dict[tensor_key] = tensor_value model = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=None, config=config, state_dict=state_dict ) model.save_pretrained(pytorch_dump_folder_path) # convert tokenizer tokenizer = AutoTokenizer.from_pretrained(checkpoint_repo) tokenizer.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint-repo", default=None, type=str, required=True, help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
transformers/src/transformers/models/roberta_prelayernorm/convert_roberta_prelayernorm_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/roberta_prelayernorm/convert_roberta_prelayernorm_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 1063 }
340
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization utils for RoFormer.""" from typing import List from tokenizers import NormalizedString, PreTokenizedString, normalizers class JiebaPreTokenizer: def __init__(self, vocab) -> None: self.vocab = vocab self.normalizers = normalizers.BertNormalizer( clean_text=False, handle_chinese_chars=True, strip_accents=False, lowercase=False, ) try: import rjieba except ImportError: raise ImportError( "You need to install rjieba to use RoFormerTokenizer. " "See https://pypi.org/project/rjieba/ for installation." ) self.jieba = rjieba def jieba_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]: splits = [] # this code slice normalized_string is too slow (6s) but test_alignement_methods can pass for token, start, end in self.jieba.tokenize(str(normalized_string), hmm=False): if token in self.vocab: splits.append(normalized_string[start:end]) else: token_list = self.normalizers.normalize_str(token).split() for token in token_list: if token: end = start + len(token) splits.append(normalized_string[start:end]) start = end # this code test_alignement_methods can't pass but fast (300ms) # for token in self.jieba.cut(str(normalized_string), False): # if token in self.vocab: # splits.append(NormalizedString(token)) # else: # token_list = self.normalizers.normalize_str(token).split() # for token in token_list: # if token: # splits.append(NormalizedString(token)) return splits def pre_tokenize(self, pretok: PreTokenizedString): pretok.split(self.jieba_split)
transformers/src/transformers/models/roformer/tokenization_utils.py/0
{ "file_path": "transformers/src/transformers/models/roformer/tokenization_utils.py", "repo_id": "transformers", "token_count": 1140 }
341
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch SeamlessM4T model.""" import copy import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...deepspeed import is_deepspeed_zero3_enabled from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Wav2Vec2BaseModelOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_seamless_m4t import SeamlessM4TConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/hf-seamless-m4t-medium" _CONFIG_FOR_DOC = "SeamlessM4TConfig" SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/hf-seamless-m4t-medium", # See all SeamlessM4T models at https://huggingface.co/models?filter=seamless_m4t ] SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP = { "microsoft/speecht5_hifigan": "https://huggingface.co/microsoft/speecht5_hifigan/resolve/main/config.json", } @dataclass class SeamlessM4TGenerationOutput(ModelOutput): """ Class defining the generated outputs from [`SeamlessM4TModel`], [`SeamlessM4TForTextToText`], [`SeamlessM4TForTextToSpeech`], [`SeamlessM4TForSpeechToSpeech`] and [`SeamlessM4TForTextToSpeech`]. Args: waveform (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): The final audio waveform predicted by the model. waveform_lengths (`torch.IntTensor` of shape `(batch_size,)`, *optional*): The length in samples of each element in the `waveform` batch. sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): The generated translated sequences. This is the output of the text-to-text or the speech-to-text models. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. unit_sequences (`torch.LongTensor` of shape `(batch_size, unit_sequence_length)`, *optional*): The generated translated unit sequences. This is the output of the text-to-units model. The second dimension (unit_sequence_length) is either equal to `t2u_max_length` or shorter if all batches finished early due to the `t2u_eos_token_id`. """ waveform: Optional[torch.FloatTensor] = None waveform_lengths: Optional[torch.IntTensor] = None sequences: Optional[Tuple[torch.FloatTensor]] = None unit_sequences: Optional[Tuple[torch.FloatTensor]] = None SEAMLESS_M4T_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`~SeamlessM4TConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SEAMLESS_M4T_INPUTS_DOCSTRING_FIRST_PART = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`): Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details. """ SEAMLESS_M4T_INPUTS_DOCSTRING_TEXT_PART = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) """ SEAMLESS_M4T_INPUTS_DOCSTRING_SPEECH_PART = r""" Args: input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`): Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details. """ SEAMLESS_M4T_INPUTS_DOCSTRING_LAST_PART = r""" attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Bart uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_bart._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape`(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ M4T_MODEL_INPUTS_DOCSTRING = SEAMLESS_M4T_INPUTS_DOCSTRING_FIRST_PART + SEAMLESS_M4T_INPUTS_DOCSTRING_LAST_PART M4T_TEXT_INPUTS_DOCSTRING = SEAMLESS_M4T_INPUTS_DOCSTRING_TEXT_PART + SEAMLESS_M4T_INPUTS_DOCSTRING_LAST_PART M4T_SPEECH_INPUTS_DOCSTRING = SEAMLESS_M4T_INPUTS_DOCSTRING_SPEECH_PART + SEAMLESS_M4T_INPUTS_DOCSTRING_LAST_PART ############ UTILS ################ # Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids def _compute_new_attention_mask(hidden_states: torch.Tensor, seq_lens: torch.Tensor): """ Computes an attention mask of the form `(batch, seq_len)` with an attention for each element in the batch that stops at the corresponding element in `seq_lens`. Args: hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, *)`): The sequences to mask, where `*` is any number of sequence-specific dimensions including none. seq_lens (`torch.Tensor` of shape `(batch)`: Each element represents the length of the sequence at the same index in `hidden_states` Returns: `torch.FloatTensor`: The float attention mask of shape `(batch, seq_len)` """ batch_size, mask_seq_len = hidden_states.shape[:2] indices = torch.arange(mask_seq_len, device=seq_lens.device).expand(batch_size, -1) bool_mask = indices >= seq_lens.unsqueeze(1).expand(-1, mask_seq_len) mask = hidden_states.new_ones((batch_size, mask_seq_len)) mask = mask.masked_fill(bool_mask, 0) return mask def format_speech_generation_kwargs(kwargs): """ Format kwargs for SeamlessM4T models that generate speech, attribute kwargs to either the text generation or the speech generation models. Args: kwargs (`dict`)`: Keyword arguments are of two types: - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model, except for `decoder_input_ids` which will only be passed through the text components. - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the text model and speech model respectively. It has the priority over the keywords without a prefix. This means you can, for example, specify a generation strategy for one generation but not for the other. """ # attribute kwargs to models kwargs_text = {} kwargs_speech = {} for key, value in kwargs.items(): if key.startswith("text_"): key = key[len("text_") :] kwargs_text[key] = value elif key.startswith("speech_"): key = key[len("speech_") :] kwargs_speech[key] = value else: # If the key is already in a specific config, then it's been set with a # submodules specific value and we don't override if key not in kwargs_text: kwargs_text[key] = value if key not in kwargs_speech: kwargs_speech[key] = value return kwargs_text, kwargs_speech ############ SPEECH ENCODER related code ################ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->SeamlessM4TConformer, feat_extract_activation->speech_encoder_hidden_act class SeamlessM4TConformerPositionalConvEmbedding(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups, ) weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0): self.conv = weight_norm(self.conv, name="weight", dim=2) deepspeed.zero.register_external_parameter(self, self.conv.weight_v) deepspeed.zero.register_external_parameter(self, self.conv.weight_g) else: self.conv = weight_norm(self.conv, name="weight", dim=2) self.padding = SeamlessM4TConformerSamePadLayer(config.num_conv_pos_embeddings) self.activation = ACT2FN[config.speech_encoder_hidden_act] def forward(self, hidden_states): hidden_states = hidden_states.transpose(1, 2) hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerRotaryPositionalEmbedding with Wav2Vec2->SeamlessM4T, num_attention_heads->speech_encoder_attention_heads class SeamlessM4TConformerRotaryPositionalEmbedding(nn.Module): """Rotary positional embedding Reference : https://blog.eleuther.ai/rotary-embeddings/ Paper: https://arxiv.org/pdf/2104.09864.pdf """ def __init__(self, config): super().__init__() dim = config.hidden_size // config.speech_encoder_attention_heads base = config.rotary_embedding_base inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim)) self.register_buffer("inv_freq", inv_freq) self.cached_sequence_length = None self.cached_rotary_positional_embedding = None def forward(self, hidden_states): sequence_length = hidden_states.shape[1] if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None: return self.cached_rotary_positional_embedding self.cached_sequence_length = sequence_length # Embeddings are computed in the dtype of the inv_freq constant time_stamps = torch.arange(sequence_length).type_as(self.inv_freq) freqs = torch.einsum("i,j->ij", time_stamps, self.inv_freq) embeddings = torch.cat((freqs, freqs), dim=-1) cos_embeddings = embeddings.cos()[:, None, None, :] sin_embeddings = embeddings.sin()[:, None, None, :] # Computed embeddings are cast to the dtype of the hidden state inputs self.cached_rotary_positional_embedding = torch.stack([cos_embeddings, sin_embeddings]).type_as(hidden_states) return self.cached_rotary_positional_embedding # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerRelPositionalEmbedding with Wav2Vec2->SeamlessM4T class SeamlessM4TConformerRelPositionalEmbedding(nn.Module): """Relative positional encoding module.""" def __init__(self, config): super().__init__() self.max_len = config.max_source_positions self.d_model = config.hidden_size self.pe = None self.extend_pe(torch.tensor(0.0).expand(1, self.max_len)) def extend_pe(self, x): # Reset the positional encodings if self.pe is not None: # self.pe contains both positive and negative parts # the length of self.pe is 2 * input_len - 1 if self.pe.size(1) >= x.size(1) * 2 - 1: if self.pe.dtype != x.dtype or self.pe.device != x.device: self.pe = self.pe.to(dtype=x.dtype, device=x.device) return # Suppose `i` is the position of query vector and `j` is the # position of key vector. We use positive relative positions when keys # are to the left (i>j) and negative relative positions otherwise (i<j). pe_positive = torch.zeros(x.size(1), self.d_model) pe_negative = torch.zeros(x.size(1), self.d_model) position = torch.arange(0, x.size(1), dtype=torch.int64).float().unsqueeze(1) div_term = torch.exp( torch.arange(0, self.d_model, 2, dtype=torch.int64).float() * -(math.log(10000.0) / self.d_model) ) pe_positive[:, 0::2] = torch.sin(position * div_term) pe_positive[:, 1::2] = torch.cos(position * div_term) pe_negative[:, 0::2] = torch.sin(-1 * position * div_term) pe_negative[:, 1::2] = torch.cos(-1 * position * div_term) # Reverse the order of positive indices and concat both positive and # negative indices. This is used to support the shifting trick # as in https://arxiv.org/abs/1901.02860 pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0) pe_negative = pe_negative[1:].unsqueeze(0) pe = torch.cat([pe_positive, pe_negative], dim=1) self.pe = pe.to(device=x.device, dtype=x.dtype) def forward(self, hidden_states: torch.Tensor): self.extend_pe(hidden_states) start_idx = self.pe.size(1) // 2 - hidden_states.size(1) + 1 end_idx = self.pe.size(1) // 2 + hidden_states.size(1) relative_position_embeddings = self.pe[:, start_idx:end_idx] return relative_position_embeddings # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerSamePadLayer with Wav2Vec2->SeamlessM4T class SeamlessM4TConformerSamePadLayer(nn.Module): def __init__(self, num_conv_pos_embeddings): super().__init__() self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def forward(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, :, : -self.num_pad_remove] return hidden_states class SeamlessM4TConformerFeatureProjection(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.feature_projection_input_dim, eps=config.layer_norm_eps) self.projection = nn.Linear(config.feature_projection_input_dim, config.hidden_size) self.dropout = nn.Dropout(config.speech_encoder_dropout) def forward(self, hidden_states): # non-projected hidden states are needed for quantization norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class SeamlessM4TConformerFeedForward(nn.Module): def __init__(self, config, act_fn=None, dropout=None): super().__init__() dropout = dropout if dropout is not None else config.speech_encoder_dropout act_fn = act_fn if act_fn is not None else config.speech_encoder_hidden_act self.intermediate_dropout = nn.Dropout(dropout) self.intermediate_dense = nn.Linear(config.hidden_size, config.speech_encoder_intermediate_size) self.intermediate_act_fn = ACT2FN[act_fn] if isinstance(act_fn, str) else act_fn self.output_dense = nn.Linear(config.speech_encoder_intermediate_size, config.hidden_size) self.output_dropout = nn.Dropout(dropout) def forward(self, hidden_states): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states) return hidden_states class SeamlessM4TConformerConvolutionModule(nn.Module): """Convolution block used in the conformer block""" def __init__(self, config): super().__init__() if (config.conv_depthwise_kernel_size - 1) % 2 == 1: raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding") self.layer_norm = nn.LayerNorm(config.hidden_size) self.pointwise_conv1 = nn.Conv1d( config.hidden_size, 2 * config.hidden_size, kernel_size=1, stride=1, padding=0, bias=False, ) self.glu = nn.GLU(dim=1) self.depthwise_conv = nn.Conv1d( config.hidden_size, config.hidden_size, config.conv_depthwise_kernel_size, stride=1, padding="same", groups=config.hidden_size, bias=False, ) self.batch_norm = nn.BatchNorm1d(config.hidden_size) self.activation = ACT2FN[config.speech_encoder_hidden_act] self.pointwise_conv2 = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size=1, stride=1, padding=0, bias=False, ) self.dropout = nn.Dropout(config.speech_encoder_dropout) def forward(self, hidden_states, attention_mask=None): hidden_states = self.layer_norm(hidden_states) # Ensure that we do not leak padded positions in depthwise convolution. # Put 0 where necessary if attention_mask is not None: hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0) # exchange the temporal dimension and the feature dimension hidden_states = hidden_states.transpose(1, 2) # GLU mechanism # => (batch, 2*channel, dim) hidden_states = self.pointwise_conv1(hidden_states) # => (batch, channel, dim) hidden_states = self.glu(hidden_states) # 1D Depthwise Conv hidden_states = self.depthwise_conv(hidden_states) hidden_states = self.batch_norm(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.pointwise_conv2(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states class SeamlessM4TConformerSelfAttention(nn.Module): """Construct a SeamlessM4TConformerSelfAttention object. Can be enhanced with rotary or relative position embeddings. """ def __init__(self, config, use_position_embeddings=True): super().__init__() self.head_size = config.hidden_size // config.speech_encoder_attention_heads self.num_heads = config.speech_encoder_attention_heads self.position_embeddings_type = config.position_embeddings_type if use_position_embeddings else None self.linear_q = nn.Linear(config.hidden_size, config.hidden_size) self.linear_k = nn.Linear(config.hidden_size, config.hidden_size) self.linear_v = nn.Linear(config.hidden_size, config.hidden_size) self.linear_out = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(p=config.speech_encoder_dropout) if self.position_embeddings_type == "relative": # linear transformation for positional encoding self.linear_pos = nn.Linear(config.hidden_size, config.hidden_size, bias=False) # these two learnable bias are used in matrix c and matrix d # as described in https://arxiv.org/abs/1901.02860 Section 3.3 self.pos_bias_u = nn.Parameter(torch.zeros(self.num_heads, self.head_size)) self.pos_bias_v = nn.Parameter(torch.zeros(self.num_heads, self.head_size)) # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerSelfAttention.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, relative_position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: # self-attention mechanism batch_size, sequence_length, hidden_size = hidden_states.size() # make sure query/key states can be != value states query_key_states = hidden_states value_states = hidden_states if self.position_embeddings_type == "rotary": if relative_position_embeddings is None: raise ValueError( "`relative_position_embeddings` has to be defined when `self.position_embeddings_type == 'rotary'" ) query_key_states = self._apply_rotary_embedding(query_key_states, relative_position_embeddings) # project query_key_states and value_states query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size) key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size) value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size) # => (batch, head, time1, d_k) query = query.transpose(1, 2) key = key.transpose(1, 2) value = value.transpose(1, 2) if self.position_embeddings_type == "relative": if relative_position_embeddings is None: raise ValueError( "`relative_position_embeddings` has to be defined when `self.position_embeddings_type ==" " 'relative'" ) # apply relative_position_embeddings to qk scores # as proposed in Transformer_XL: https://arxiv.org/abs/1901.02860 scores = self._apply_relative_embeddings( query=query, key=key, relative_position_embeddings=relative_position_embeddings ) else: scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.head_size) # apply attention_mask if necessary if attention_mask is not None: scores = scores + attention_mask # => (batch, head, time1, time2) probs = torch.softmax(scores, dim=-1) probs = self.dropout(probs) # => (batch, head, time1, d_k) hidden_states = torch.matmul(probs, value) # => (batch, time1, hidden_size) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size) hidden_states = self.linear_out(hidden_states) return hidden_states, probs # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerSelfAttention._apply_rotary_embedding def _apply_rotary_embedding(self, hidden_states, relative_position_embeddings): batch_size, sequence_length, hidden_size = hidden_states.size() hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads, self.head_size) cos = relative_position_embeddings[0, :sequence_length, ...] sin = relative_position_embeddings[1, :sequence_length, ...] # rotate hidden_states with rotary embeddings hidden_states = hidden_states.transpose(0, 1) rotated_states_begin = hidden_states[..., : self.head_size // 2] rotated_states_end = hidden_states[..., self.head_size // 2 :] rotated_states = torch.cat((-rotated_states_end, rotated_states_begin), dim=rotated_states_begin.ndim - 1) hidden_states = (hidden_states * cos) + (rotated_states * sin) hidden_states = hidden_states.transpose(0, 1) hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads * self.head_size) return hidden_states # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerSelfAttention._apply_relative_embeddings def _apply_relative_embeddings(self, query, key, relative_position_embeddings): # 1. project positional embeddings # => (batch, head, 2*time1-1, d_k) proj_relative_position_embeddings = self.linear_pos(relative_position_embeddings) proj_relative_position_embeddings = proj_relative_position_embeddings.view( relative_position_embeddings.size(0), -1, self.num_heads, self.head_size ) proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(1, 2) proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(2, 3) # 2. Add bias to query # => (batch, head, time1, d_k) query = query.transpose(1, 2) q_with_bias_u = (query + self.pos_bias_u).transpose(1, 2) q_with_bias_v = (query + self.pos_bias_v).transpose(1, 2) # 3. attention score: first compute matrix a and matrix c # as described in https://arxiv.org/abs/1901.02860 Section 3.3 # => (batch, head, time1, time2) scores_ac = torch.matmul(q_with_bias_u, key.transpose(-2, -1)) # 4. then compute matrix b and matrix d # => (batch, head, time1, 2*time1-1) scores_bd = torch.matmul(q_with_bias_v, proj_relative_position_embeddings) # 5. shift matrix b and matrix d zero_pad = torch.zeros((*scores_bd.size()[:3], 1), device=scores_bd.device, dtype=scores_bd.dtype) scores_bd_padded = torch.cat([zero_pad, scores_bd], dim=-1) scores_bd_padded_shape = scores_bd.size()[:2] + (scores_bd.shape[3] + 1, scores_bd.shape[2]) scores_bd_padded = scores_bd_padded.view(*scores_bd_padded_shape) scores_bd = scores_bd_padded[:, :, 1:].view_as(scores_bd) scores_bd = scores_bd[:, :, :, : scores_bd.size(-1) // 2 + 1] # 6. sum matrices # => (batch, head, time1, time2) scores = (scores_ac + scores_bd) / math.sqrt(self.head_size) return scores class SeamlessM4TConformerEncoderLayer(nn.Module): """Conformer block based on https://arxiv.org/abs/2005.08100.""" # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerEncoderLayer.__init__ with Wav2Vec2->SeamlessM4T, attention_dropout->speech_encoder_dropout, torch.nn->nn def __init__(self, config): super().__init__() embed_dim = config.hidden_size dropout = config.speech_encoder_dropout # Feed-forward 1 self.ffn1_layer_norm = nn.LayerNorm(embed_dim) self.ffn1 = SeamlessM4TConformerFeedForward(config) # Self-Attention self.self_attn_layer_norm = nn.LayerNorm(embed_dim) self.self_attn_dropout = nn.Dropout(dropout) self.self_attn = SeamlessM4TConformerSelfAttention(config) # Conformer Convolution self.conv_module = SeamlessM4TConformerConvolutionModule(config) # Feed-forward 2 self.ffn2_layer_norm = nn.LayerNorm(embed_dim) self.ffn2 = SeamlessM4TConformerFeedForward(config) self.final_layer_norm = nn.LayerNorm(embed_dim) def forward( self, hidden_states, attention_mask: Optional[torch.Tensor] = None, relative_position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, conv_attention_mask: Optional[torch.Tensor] = None, ): hidden_states = hidden_states # 1. Feed-Forward 1 layer residual = hidden_states hidden_states = self.ffn1_layer_norm(hidden_states) hidden_states = self.ffn1(hidden_states) hidden_states = hidden_states * 0.5 + residual residual = hidden_states # 2. Self-Attention layer hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weigts = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, relative_position_embeddings=relative_position_embeddings, output_attentions=output_attentions, ) hidden_states = self.self_attn_dropout(hidden_states) hidden_states = hidden_states + residual # 3. Convolutional Layer residual = hidden_states hidden_states = self.conv_module(hidden_states, attention_mask=conv_attention_mask) hidden_states = residual + hidden_states # 4. Feed-Forward 2 Layer residual = hidden_states hidden_states = self.ffn2_layer_norm(hidden_states) hidden_states = self.ffn2(hidden_states) hidden_states = hidden_states * 0.5 + residual hidden_states = self.final_layer_norm(hidden_states) return hidden_states, attn_weigts class SeamlessM4TConformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config if config.position_embeddings_type == "relative": self.embed_positions = SeamlessM4TConformerRelPositionalEmbedding(config) elif config.position_embeddings_type == "rotary": self.embed_positions = SeamlessM4TConformerRotaryPositionalEmbedding(config) else: self.embed_positions = None self.dropout = nn.Dropout(config.speech_encoder_dropout) self.layers = nn.ModuleList( [SeamlessM4TConformerEncoderLayer(config) for _ in range(config.speech_encoder_layers)] ) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None conv_attention_mask = attention_mask if attention_mask is not None: # make sure padded tokens output 0 hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0) # extend attention_mask attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min attention_mask = attention_mask.expand( attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1] ) hidden_states = self.dropout(hidden_states) if self.embed_positions is not None: relative_position_embeddings = self.embed_positions(hidden_states) else: relative_position_embeddings = None deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = ( True if self.training and (dropout_probability < self.config.speech_encoder_layerdrop) else False ) if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer.__call__, hidden_states, attention_mask, relative_position_embeddings, ) else: layer_outputs = layer( hidden_states, attention_mask=attention_mask, relative_position_embeddings=relative_position_embeddings, output_attentions=output_attentions, conv_attention_mask=conv_attention_mask, ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class SeamlessM4TConformerAdapterLayer(nn.Module): def __init__(self, config): super().__init__() embed_dim = config.hidden_size dropout = config.adaptor_dropout self.kernel_size = config.adaptor_kernel_size self.stride = config.adaptor_stride # 1. residual convolution self.residual_layer_norm = nn.LayerNorm(embed_dim) self.residual_conv = nn.Conv1d( embed_dim, 2 * embed_dim, self.kernel_size, stride=self.stride, padding=self.stride // 2, ) self.activation = nn.GLU(dim=1) # Self-Attention self.self_attn_layer_norm = nn.LayerNorm(embed_dim) self.self_attn_conv = nn.Conv1d( embed_dim, 2 * embed_dim, self.kernel_size, stride=self.stride, padding=self.stride // 2, ) self.self_attn = SeamlessM4TConformerSelfAttention(config, use_position_embeddings=False) self.self_attn_dropout = nn.Dropout(dropout) # Feed-forward self.ffn_layer_norm = nn.LayerNorm(embed_dim) self.ffn = SeamlessM4TConformerFeedForward(config, act_fn="relu", dropout=dropout) def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask): pad = self.kernel_size // 2 seq_lens = attention_mask.size(1) - (1 - attention_mask.int()).sum(1) seq_lens = ((seq_lens + 2 * pad - self.kernel_size) / self.stride) + 1 return seq_lens.floor() def forward( self, hidden_states, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ): residual = self.residual_layer_norm(hidden_states) # Apply pooling to the residual to match the sequence length of the # multi-head attention output. # (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len) residual = residual.transpose(1, 2) residual = self.residual_conv(residual) residual = self.activation(residual) # (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim) residual = residual.transpose(1, 2) hidden_states = self.self_attn_layer_norm(hidden_states) # Apply pooling before feeding to the multihead-attention layer. # (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.self_attn_conv(hidden_states) hidden_states = self.activation(hidden_states) # (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim) hidden_states = hidden_states.transpose(1, 2) if attention_mask is not None: sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to( hidden_states.device ) attention_mask = _compute_new_attention_mask(hidden_states=hidden_states, seq_lens=sub_sampled_lengths) attention_mask = _prepare_4d_attention_mask( attention_mask, hidden_states.dtype, ) # The rest of the computation is identical to a vanilla Transformer # encoder layer. hidden_states, attn_weigths = self.self_attn( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = self.self_attn_dropout(hidden_states) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.ffn_layer_norm(hidden_states) hidden_states = self.ffn(hidden_states) + residual return hidden_states class SeamlessM4TConformerAdapter(nn.Module): def __init__(self, config): super().__init__() self.layers = nn.ModuleList(SeamlessM4TConformerAdapterLayer(config) for _ in range(config.num_adapter_layers)) def forward(self, hidden_states, attention_mask): # down project hidden_states if necessary for layer in self.layers: hidden_states = layer(hidden_states, attention_mask) return hidden_states ############ TEXT / UNITS related code ################ # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding class SeamlessM4TSinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) if hasattr(self, "weights"): # in forward put the weights on the correct dtype and device of the param emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.register_buffer("weights", emb_weights, persistent=False) @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward( self, input_ids: torch.Tensor = None, inputs_embeds: torch.Tensor = None, past_key_values_length: int = 0 ): if input_ids is not None: bsz, seq_len = input_ids.size() # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to( input_ids.device ) else: bsz, seq_len = inputs_embeds.size()[:-1] position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length) # expand embeddings if needed max_pos = self.padding_idx + 1 + seq_len + past_key_values_length if max_pos > self.weights.size(0): self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx) return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach() def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length class SeamlessM4TAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" # Copied from transformers.models.bart.modeling_bart.BartAttention.__init__ with Bart->SeamlessM4T def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[SeamlessM4TConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if encoder_hidden_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = encoder_hidden_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == encoder_hidden_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `encoder_hidden_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == encoder_hidden_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(encoder_hidden_states), -1, bsz) value_states = self._shape(self.v_proj(encoder_hidden_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value # Copied from transformers.models.nllb_moe.modeling_nllb_moe.NllbMoeDenseActDense with NllbMoe->SeamlessM4T,DenseActDense->FeedForwardNetwork, d_model->hidden_size class SeamlessM4TFeedForwardNetwork(nn.Module): def __init__(self, config: SeamlessM4TConfig, ffn_dim: int): super().__init__() self.fc1 = nn.Linear(config.hidden_size, ffn_dim) self.fc2 = nn.Linear(ffn_dim, config.hidden_size) self.dropout = nn.Dropout(config.activation_dropout) self.act = ACT2FN[config.activation_function] def forward(self, hidden_states): hidden_states = self.fc1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if ( isinstance(self.fc2.weight, torch.Tensor) and hidden_states.dtype != self.fc2.weight.dtype and (self.fc2.weight.dtype != torch.int8 and self.fc2.weight.dtype != torch.uint8) ): hidden_states = hidden_states.to(self.fc2.weight.dtype) hidden_states = self.fc2(hidden_states) return hidden_states class SeamlessM4TEncoderLayer(nn.Module): def __init__(self, config: SeamlessM4TConfig, encoder_ffn_dim=None, encoder_attention_heads=None): super().__init__() encoder_ffn_dim = config.encoder_ffn_dim if encoder_ffn_dim is None else encoder_ffn_dim encoder_attention_heads = ( config.encoder_attention_heads if encoder_attention_heads is None else encoder_attention_heads ) self.embed_dim = config.hidden_size self.self_attn = SeamlessM4TAttention( embed_dim=self.embed_dim, num_heads=encoder_attention_heads, dropout=config.attention_dropout, ) self.attn_dropout = nn.Dropout(config.dropout) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.ffn = SeamlessM4TFeedForwardNetwork(config, ffn_dim=encoder_ffn_dim) self.ffn_layer_norm = nn.LayerNorm(config.hidden_size) self.ffn_dropout = nn.Dropout(config.activation_dropout) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool = False, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = self.attn_dropout(hidden_states) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.ffn_layer_norm(hidden_states) hidden_states = self.ffn(hidden_states) hidden_states = self.ffn_dropout(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class SeamlessM4TDecoderLayer(nn.Module): def __init__(self, config: SeamlessM4TConfig, decoder_ffn_dim=None, decoder_attention_heads=None): super().__init__() decoder_ffn_dim = config.decoder_ffn_dim if decoder_ffn_dim is None else decoder_ffn_dim decoder_attention_heads = ( config.decoder_attention_heads if decoder_attention_heads is None else decoder_attention_heads ) self.embed_dim = config.hidden_size self.self_attn = SeamlessM4TAttention( embed_dim=self.embed_dim, num_heads=decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.attn_dropout = nn.Dropout(config.dropout) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.cross_attention = SeamlessM4TAttention( self.embed_dim, decoder_attention_heads, config.attention_dropout, is_decoder=True ) self.cross_attention_layer_norm = nn.LayerNorm(self.embed_dim) self.ffn = SeamlessM4TFeedForwardNetwork(config, ffn_dim=decoder_ffn_dim) self.ffn_layer_norm = nn.LayerNorm(config.hidden_size) self.ffn_dropout = nn.Dropout(config.activation_dropout) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = self.attn_dropout(hidden_states) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.cross_attention_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.cross_attention( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, past_key_value=cross_attn_past_key_value, attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) hidden_states = self.attn_dropout(hidden_states) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value += cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.ffn_layer_norm(hidden_states) hidden_states = self.ffn(hidden_states) hidden_states = self.ffn_dropout(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states, present_key_value) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs ############ SUB-MODELS related code ################ class SeamlessM4TPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SeamlessM4TConfig base_model_prefix = "seamless_m4t" supports_gradient_checkpointing = True _no_split_modules = ["SeamlessM4TEncoderLayer", "SeamlessM4TDecoderLayer", "SeamlessM4TConformerEncoderLayer"] def _init_weights(self, module): """Initialize the weights""" std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, SeamlessM4TConformerSelfAttention): if hasattr(module, "pos_bias_u"): nn.init.xavier_uniform_(module.pos_bias_u) if hasattr(module, "pos_bias_v"): nn.init.xavier_uniform_(module.pos_bias_v) elif isinstance(module, SeamlessM4TConformerPositionalConvEmbedding): nn.init.normal_( module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)), ) nn.init.constant_(module.conv.bias, 0) elif isinstance(module, SeamlessM4TConformerFeatureProjection): k = math.sqrt(1 / module.projection.in_features) nn.init.uniform_(module.projection.weight, a=-k, b=k) nn.init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask): kernel_size, stride = self.config.adaptor_kernel_size, self.config.adaptor_stride pad = kernel_size // 2 seq_lens = attention_mask.size(1) - (1 - attention_mask.int()).sum(1) seq_lens = ((seq_lens + 2 * pad - kernel_size) / stride) + 1 return seq_lens.floor() def compute_last_hidden_states_per_sample( self, hidden_states: Tuple[Tuple[torch.Tensor]], beam_indices: Optional[torch.Tensor] = None, ) -> torch.Tensor: """ Computes the last hidden states. Parameters: hidden_states (`Tuple[Tuple[torch.Tensor]]`): The generated hidden states. Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of torch.FloatTensor of shape (batch_size*num_beams*num_return_sequences, generated_length, hidden_size). beam_indices (`torch.LongTensor`, *optional*): Beam indices of generated token id at each generation step. `torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at generate-time. Return: `torch.Tensor`: A `torch.Tensor` of shape `(batch_size*num_return_sequences, sequence_length, hidden_size)` containing the last hidden states. ```""" # 1. First, let's compute last_hidden_states from hidden_states. # For each generation step, takes the hidden state from the last layer. # shape: (batch_size*vocab_size*num_return_sequences, # generation_steps, hidden_dim) last_hidden_states = torch.concat([hidden_states[-1] for hidden_states in hidden_states], dim=1) # 2. In absence of `beam_indices`, we can assume that we come from e.g. greedy search, which is equivalent # to a beam search approach were the first (and only) beam is always selected # in that case, return directly last_hidden_states if beam_indices is None: return last_hidden_states # 3. cut beam_indices to longest beam length beam_indices_mask = beam_indices < 0 max_beam_length = (1 - beam_indices_mask.long()).sum(-1).max() beam_indices = beam_indices.clone()[:, :max_beam_length] beam_indices_mask = beam_indices_mask[:, :max_beam_length] # 4. Set indices of beams that finished early to 0; such indices will be masked correctly afterwards anyways beam_indices[beam_indices_mask] = 0 # 5. expand beam_indices to last_hidden_states dim beam_indices = beam_indices.unsqueeze(-1) beam_indices = beam_indices.expand(-1, -1, last_hidden_states.shape[-1]) # 6. select the right candidate for each beam # in other words, new_last_hidden_states[i,j,k] = last_hidden_states[beam_indices[i,j,k], j, k] for all i, j, k last_hidden_states = torch.gather(last_hidden_states, 0, beam_indices) return last_hidden_states @add_start_docstrings( """Transformer speech encoder consisting of *config.speech_encoder_layers* conformer self attention layers. Each layer is a [`SeamlessM4TConformerEncoderLayer`].""", SEAMLESS_M4T_START_DOCSTRING, ) class SeamlessM4TSpeechEncoder(SeamlessM4TPreTrainedModel): main_input_name = "input_features" def __init__(self, config: SeamlessM4TConfig): super().__init__(config) self.feature_projection = SeamlessM4TConformerFeatureProjection(config) self.encoder = SeamlessM4TConformerEncoder(config) self.intermediate_ffn = SeamlessM4TConformerFeedForward(config, act_fn="relu", dropout=0.0) self.adapter = SeamlessM4TConformerAdapter(config) if config.add_adapter else None self.inner_layer_norm = nn.LayerNorm(config.hidden_size) # Initialize weights and apply final processing self.post_init() def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple, Wav2Vec2BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_features is None: raise ValueError( """Both `input_features` and `inputs_embeds` are `None` in `SeamlessM4TSpeechEncoder.forward`. Make sure one of them is not `None`.""" ) hidden_states = self.feature_projection(input_features) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] expanded_hidden_states = self.intermediate_ffn(hidden_states) hidden_states = hidden_states + 0.5 * expanded_hidden_states if self.adapter is not None: hidden_states = self.adapter(hidden_states, attention_mask=attention_mask) hidden_states = self.inner_layer_norm(hidden_states) if not return_dict: return (hidden_states,) + encoder_outputs[1:] return Wav2Vec2BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) # inspired from MBart and NllbMoe @add_start_docstrings( "Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`SeamlessM4TEncoderLayer`].", SEAMLESS_M4T_START_DOCSTRING, """ embed_tokens (`nn.Embedding`, *optional*): Input embedding is_t2u_encoder (`bool`, *optional*, defaults to `False`): indicates if it belongs to the text-to-units model, in which case it won't have input embeddings """, ) class SeamlessM4TEncoder(SeamlessM4TPreTrainedModel): def __init__( self, config: SeamlessM4TConfig, embed_tokens: Optional[nn.Embedding] = None, is_t2u_encoder: bool = False, ): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop self.padding_idx = config.pad_token_id embed_dim = config.hidden_size self.is_t2u_encoder = is_t2u_encoder self.max_source_positions = config.max_position_embeddings if not self.is_t2u_encoder: self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = SeamlessM4TSinusoidalPositionalEmbedding( self.max_source_positions, embed_dim, self.padding_idx, ) layers = [] for _ in range(config.encoder_layers): layers.append( SeamlessM4TEncoderLayer( config, encoder_attention_heads=config.encoder_attention_heads, encoder_ffn_dim=config.encoder_ffn_dim, ) ) self.layers = nn.ModuleList(layers) self.layer_norm = nn.LayerNorm(config.hidden_size) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple, BaseModelOutput]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and self.is_t2u_encoder: raise ValueError( "You cannot pass input_ids to the encoder of the text_to_units model. Pass inputs_embeds instead." ) # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input = input_ids input_shape = input.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale if not self.is_t2u_encoder: embed_pos = self.embed_positions(input) hidden_states = inputs_embeds + embed_pos.to(inputs_embeds.device) else: hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.forward, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) @add_start_docstrings( "Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`SeamlessM4TDecoderLayer`].", SEAMLESS_M4T_START_DOCSTRING, """ embed_tokens (`nn.Embedding`, *optional*): Input embedding """, ) class SeamlessM4TDecoder(SeamlessM4TPreTrainedModel): def __init__( self, config: SeamlessM4TConfig, embed_tokens: Optional[nn.Embedding] = None, ): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0 if embed_tokens is not None: # if embed_tokens defined, use its shape instead self.embed_tokens = nn.Embedding(embed_tokens.num_embeddings, embed_tokens.embedding_dim, self.padding_idx) self.embed_tokens.weight = embed_tokens.weight else: self.embed_tokens = nn.Embedding(self.vocab_size, config.hidden_size, self.padding_idx) self.embed_positions = SeamlessM4TSinusoidalPositionalEmbedding( self.max_target_positions, config.hidden_size, padding_idx=self.padding_idx, ) layers = [] for _ in range(config.decoder_layers): layers.append( SeamlessM4TDecoderLayer( config, decoder_attention_heads=config.decoder_attention_heads, decoder_ffn_dim=config.decoder_ffn_dim, ) ) self.layers = nn.ModuleList(layers) self.layer_norm = nn.LayerNorm(config.hidden_size) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input = input_ids input_shape = input.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # embed positions positions = self.embed_positions(input, past_key_values_length=past_key_values_length) hidden_states = inputs_embeds + positions.to(inputs_embeds.device) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, None, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[1],) if output_attentions: all_self_attns += (layer_outputs[2],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[3],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "Transformer bare text-to-unit encoder-decoder. The encoder is a [`SeamlessM4TEncoder`] without embeddings and the decoder is a [`SeamlessM4TDecoder`].", SEAMLESS_M4T_START_DOCSTRING, """ embed_tokens_decoder (`nn.Embedding`, *optional*): input embedding of the decoder. """, ) class SeamlessM4TTextToUnitModel(SeamlessM4TPreTrainedModel): def __init__( self, config: SeamlessM4TConfig, embed_tokens_decoder: Optional[nn.Embedding] = None, ): super().__init__(config) self.encoder = SeamlessM4TEncoder(config, is_t2u_encoder=True) self.decoder = SeamlessM4TDecoder(config, embed_tokens_decoder) # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "Transformer text-to-unit encoder-decoder with a language model head. The base encoder-decoder model is a [`SeamlessM4TTextToUnit`].", SEAMLESS_M4T_START_DOCSTRING, """ embed_tokens_decoder (`nn.Embedding`, *optional*): input embedding of the decoder. """, ) class SeamlessM4TTextToUnitForConditionalGeneration(SeamlessM4TPreTrainedModel): _keys_to_ignore_on_load_missing = [ "vocoder", "speech_encoder", "text_encoder", "text_decoder", ] _tied_weights_keys = ["decoder.embed_tokens.weight", "lm_head.weight"] def __init__( self, config: SeamlessM4TConfig, embed_tokens_decoder: Optional[nn.Embedding] = None, ): # update config - used principaly for bos_token_id etc. config = copy.deepcopy(config) for param, val in config.to_dict().items(): if param.startswith("t2u_"): config.__setattr__(param[4:], val) super().__init__(config) self.model = SeamlessM4TTextToUnitModel(config, embed_tokens_decoder) self.lm_head = nn.Linear(config.hidden_size, config.t2u_vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value @add_start_docstrings_to_model_forward(M4T_TEXT_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.t2u_pad_token_id, self.config.t2u_decoder_start_token_id ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(lm_logits.device) masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "use_cache": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.t2u_pad_token_id, self.config.t2u_decoder_start_token_id) @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: # cached cross_attention states don't have to be reordered -> they are always the same reordered_past += ( tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past def _tie_weights(self) -> None: if getattr(self.config, "tie_word_embeddings", True): output_embeddings = self.get_output_embeddings() if output_embeddings is not None: self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) ############ VOCODER related code ################ HIFIGAN_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`SeamlessM4TConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ # Copied from transformers.models.speecht5.modeling_speecht5.HifiGanResidualBlock class HifiGanResidualBlock(nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1): super().__init__() self.leaky_relu_slope = leaky_relu_slope self.convs1 = nn.ModuleList( [ nn.Conv1d( channels, channels, kernel_size, stride=1, dilation=dilation[i], padding=self.get_padding(kernel_size, dilation[i]), ) for i in range(len(dilation)) ] ) self.convs2 = nn.ModuleList( [ nn.Conv1d( channels, channels, kernel_size, stride=1, dilation=1, padding=self.get_padding(kernel_size, 1), ) for _ in range(len(dilation)) ] ) def get_padding(self, kernel_size, dilation=1): return (kernel_size * dilation - dilation) // 2 def apply_weight_norm(self): for layer in self.convs1: nn.utils.weight_norm(layer) for layer in self.convs2: nn.utils.weight_norm(layer) def remove_weight_norm(self): for layer in self.convs1: nn.utils.remove_weight_norm(layer) for layer in self.convs2: nn.utils.remove_weight_norm(layer) def forward(self, hidden_states): for conv1, conv2 in zip(self.convs1, self.convs2): residual = hidden_states hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = conv1(hidden_states) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = conv2(hidden_states) hidden_states = hidden_states + residual return hidden_states class SeamlessM4TVariancePredictor(nn.Module): def __init__(self, config): super().__init__() embed_dim = config.unit_embed_dim kernel_size = config.variance_predictor_kernel_size var_pred_dropout = config.var_pred_dropout self.conv1 = nn.Conv1d( embed_dim, embed_dim, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, ) self.activation_fuction = nn.ReLU() self.ln1 = nn.LayerNorm(embed_dim) self.dropout_module = nn.Dropout(p=var_pred_dropout) self.conv2 = nn.Conv1d( embed_dim, embed_dim, kernel_size=kernel_size, padding=1, ) self.ln2 = nn.LayerNorm(embed_dim) self.proj = nn.Linear(embed_dim, 1) def forward(self, hidden_states: Tensor) -> Tensor: # Input: B x T x C; Output: B x T hidden_states = self.conv1(hidden_states.transpose(1, 2)) hidden_states = self.activation_fuction(hidden_states).transpose(1, 2) hidden_states = self.dropout_module(self.ln1(hidden_states)) hidden_states = self.conv2(hidden_states.transpose(1, 2)) hidden_states = self.activation_fuction(hidden_states).transpose(1, 2) hidden_states = self.dropout_module(self.ln2(hidden_states)) return self.proj(hidden_states).squeeze(dim=2) class SeamlessM4THifiGan(nn.Module): def __init__(self, config: SeamlessM4TConfig): super().__init__() model_in_dim = config.unit_embed_dim + config.lang_embed_dim + config.spkr_embed_dim self.leaky_relu_slope = config.leaky_relu_slope self.num_kernels = len(config.resblock_kernel_sizes) self.num_upsamples = len(config.upsample_rates) self.conv_pre = nn.Conv1d( model_in_dim, config.upsample_initial_channel, kernel_size=7, stride=1, padding=3, ) self.upsampler = nn.ModuleList() for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)): self.upsampler.append( nn.ConvTranspose1d( config.upsample_initial_channel // (2**i), config.upsample_initial_channel // (2 ** (i + 1)), kernel_size=kernel_size, stride=upsample_rate, padding=(kernel_size - upsample_rate) // 2, ) ) self.resblocks = nn.ModuleList() for i in range(len(self.upsampler)): channels = config.upsample_initial_channel // (2 ** (i + 1)) for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes): self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope)) self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3) def forward(self, input_embeds: torch.FloatTensor) -> torch.FloatTensor: r""" Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech waveform. Args: spectrogram (`torch.FloatTensor`): Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length, model_in_dim)`, or un-batched and of shape `(sequence_length, model_in_dim)`. Note that `model_in_dim` is the sum of `config.unit_embed_dim`, `config.lang_embed_dim` and `config.spkr_embed_dim`. Returns: `torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`. """ hidden_states = self.conv_pre(input_embeds) for i in range(self.num_upsamples): hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = self.upsampler[i](hidden_states) res_state = self.resblocks[i * self.num_kernels](hidden_states) for j in range(1, self.num_kernels): res_state += self.resblocks[i * self.num_kernels + j](hidden_states) hidden_states = res_state / self.num_kernels hidden_states = nn.functional.leaky_relu(hidden_states) hidden_states = self.conv_post(hidden_states) hidden_states = torch.tanh(hidden_states) # remove seq-len dim since this collapses to 1 waveform = hidden_states.squeeze(1) return waveform @add_start_docstrings( """Code HiFi-GAN vocoder as described in this [repository](https://github.com/facebookresearch/speech-resynthesis).""", HIFIGAN_START_DOCSTRING, ) class SeamlessM4TCodeHifiGan(PreTrainedModel): config_class = SeamlessM4TConfig main_input_name = "input_embeds" _no_split_modules = [] def __init__(self, config): super().__init__(config) self.pad_token_id = config.t2u_pad_token_id self.dur_predictor = SeamlessM4TVariancePredictor(config) self.unit_embedding = nn.Embedding(config.unit_hifi_gan_vocab_size, config.unit_embed_dim) self.speaker_embedding = nn.Embedding(config.vocoder_num_spkrs, config.spkr_embed_dim) self.language_embedding = nn.Embedding(config.vocoder_num_langs, config.lang_embed_dim) self.hifi_gan = SeamlessM4THifiGan(config) # Initialize weights and apply final processing self.post_init() def _get_dur_output_lengths(self, input_ids, dur_out): """ Computes the output length after the duration layer. """ unit_lengths = (input_ids != self.pad_token_id).sum(1) # take care of edge cases where no padding or too many padding unit_lengths = torch.clamp(unit_lengths, 0, dur_out.shape[1] - 1) cumulative_dur_out = torch.cumsum(dur_out, dim=1) unit_lengths = cumulative_dur_out.gather(dim=1, index=unit_lengths.unsqueeze(1)).squeeze() return unit_lengths def _get_output_hifigan_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the hifigan convolutional layers """ def _conv_out_length(input_length, kernel_size, stride, pad, dilation=1): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return ( torch.div(input_length + 2 * pad - dilation * (kernel_size - 1) - 1, stride, rounding_mode="floor") + 1 ) def _transpose_conv_out_length(input_length, kernel_size, stride, pad, dilation=1): return (input_length - 1) * stride - 2 * pad + dilation * (kernel_size - 1) + 1 # conv_pre input_lengths = _conv_out_length(input_lengths, 7, 1, 3) # upsampler for i, (upsample_rate, kernel_size) in enumerate( zip(self.config.upsample_rates, self.config.upsample_kernel_sizes) ): input_lengths = _transpose_conv_out_length( input_lengths, kernel_size, upsample_rate, (kernel_size - upsample_rate) // 2 ) # resblock for i in range(len(self.config.upsample_rates)): for kernel_size, dilation in zip(self.config.resblock_kernel_sizes, self.config.resblock_dilation_sizes): for dil in dilation: input_lengths = _conv_out_length( input_lengths, kernel_size, 1, (kernel_size - 1) * dil // 2, dilation=dil ) for dil in dilation: input_lengths = _conv_out_length(input_lengths, kernel_size, 1, (kernel_size - 1) // 2, dilation=1) # conv_post input_lengths = _conv_out_length(input_lengths, 7, 1, 3) return input_lengths def forward( self, input_ids: torch.LongTensor, spkr_id: torch.Tensor, lang_id: torch.Tensor ) -> Tuple[torch.Tensor]: """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`SeamlessM4TTextToUnitForConditionalGeneration`]. [What are input IDs?](../glossary#input-ids) spkr_id (`int`, *optional*): The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`. tgt_lang (`str`, *optional*): The language id to use as target language for translation. """ hidden_states = self.unit_embedding(input_ids).transpose(1, 2) spkr = self.speaker_embedding(spkr_id).transpose(1, 2) lang = self.language_embedding(lang_id).transpose(1, 2) log_dur_pred = self.dur_predictor(hidden_states.transpose(1, 2)) dur_out = torch.clamp(torch.round((torch.exp(log_dur_pred) - 1)).long(), min=1) # B x C x T if hidden_states.size(0) == 1: hidden_states = torch.repeat_interleave(hidden_states, dur_out.view(-1), dim=2) else: # if batched sample, need to interleave per sample, and pad -> loss of parallelism if hidden_states.shape[0] > 1 and self.training: logger.warning( """`self.training=True` and you use batching. You lose parallelism during the hifigan forward pass because the samples are interleaved.""" ) hidden_states = [ torch.repeat_interleave(hidden_state, duration, dim=-1).transpose(0, 1) for (hidden_state, duration) in zip(hidden_states, dur_out) ] hidden_states = nn.utils.rnn.pad_sequence(hidden_states, batch_first=True).transpose(1, 2) spkr = spkr.repeat(1, 1, hidden_states.shape[-1]) lang = lang.repeat(1, 1, hidden_states.shape[-1]) hidden_states = torch.cat([lang, hidden_states, spkr], dim=1) hidden_states = self.hifi_gan(hidden_states) unit_lengths = self._get_dur_output_lengths(input_ids, dur_out) lengths = self._get_output_hifigan_lengths(unit_lengths) return hidden_states, lengths def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear, nn.Conv1d, nn.ConvTranspose1d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def apply_weight_norm(self): nn.utils.weight_norm(self.hifi_gan.conv_pre) for layer in self.hifi_gan.upsampler: nn.utils.weight_norm(layer) for layer in self.hifi_gan.resblocks: layer.apply_weight_norm() nn.utils.weight_norm(self.hifi_gan.conv_post) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.hifi_gan.conv_pre) for layer in self.hifi_gan.upsampler: nn.utils.remove_weight_norm(layer) for layer in self.hifi_gan.resblocks: layer.remove_weight_norm() nn.utils.remove_weight_norm(self.hifi_gan.conv_post) ############ WHOLE MODEL related code ################ @add_start_docstrings( "The text-to-text SeamlessM4T Model transformer which can be used for T2TT.", SEAMLESS_M4T_START_DOCSTRING, ) class SeamlessM4TForTextToText(SeamlessM4TPreTrainedModel): _keys_to_ignore_on_load_missing = ["speech_encoder", "t2u_model", "vocoder"] main_input_name = "input_ids" _tied_weights_keys = [ "lm_head.weight", "text_encoder.embed_tokens.weight", "text_decoder.embed_tokens.weight", ] def __init__(self, config: SeamlessM4TConfig): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) self.text_encoder = SeamlessM4TEncoder(config, self.shared) self.text_decoder = SeamlessM4TDecoder(config, self.shared) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.text_encoder def get_decoder(self): return self.text_decoder def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_input_embeddings(self): return self.text_decoder.embed_tokens def set_input_embeddings(self, value): self.text_encoder.embed_tokens = value self.text_decoder.embed_tokens = value self.shared = value def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.lm_head, self.shared) @add_start_docstrings_to_model_forward(M4T_TEXT_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]: if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) encoder_attention_mask = attention_mask # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.text_decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(decoder_outputs[0]) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(lm_logits.device) masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: outputs = decoder_outputs + encoder_outputs output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def generate( self, input_ids=None, tgt_lang=None, generation_config=None, logits_processor=None, stopping_criteria=None, prefix_allowed_tokens_fn=None, synced_gpus=False, **kwargs, ): """ Generates sequences of token ids. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Parameters: input_ids (`torch.Tensor` of varying shape depending on the modality, *optional*): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) tgt_lang (`str`, *optional*): The language to use as target language for translation. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a generation config. If a stopping criteria is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*): If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful for constrained generation conditioned on the prefix, as described in [Autoregressive Entity Retrieval](https://arxiv.org/abs/2010.00904). synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. The possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateEncoderDecoderOutput`], - [`~generation.GenerateBeamEncoderDecoderOutput`] """ # prepare text_decoder_input_ids text_decoder_input_ids = kwargs.pop("decoder_input_ids", None) # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids. if tgt_lang is not None: batch_size = len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds")) if hasattr(self.generation_config, "text_decoder_lang_to_code_id"): # also accept __xxx__ tgt_lang = tgt_lang.replace("__", "") if tgt_lang not in self.generation_config.text_decoder_lang_to_code_id: raise ValueError( f"""`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in {', '.join(self.generation_config.text_decoder_lang_to_code_id.keys())}""" ) # tgt_lang gets priority over decoder input ids text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang) text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device) else: raise ValueError( """This model generation config doesn't have a `text_decoder_lang_to_code_id` key which maps the target language to the right token id. Make sure to load the right generation config.""" ) else: # only a warning, otherwise errors appear in the tests logger.warning( """You must either specify a `tgt_lang` or pass a correct `text_decoder_input_ids` to get a correct generation, otherwise the generation will probably make no sense.""" ) return super().generate( input_ids, generation_config, logits_processor, stopping_criteria, prefix_allowed_tokens_fn, synced_gpus, decoder_input_ids=text_decoder_input_ids, **kwargs, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "use_cache": use_cache, } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: # cached cross_attention states don't have to be reordered -> they are always the same reordered_past += ( tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past @add_start_docstrings( "The speech-to-text SeamlessM4T Model transformer which can be used for S2TT.", SEAMLESS_M4T_START_DOCSTRING, ) class SeamlessM4TForSpeechToText(SeamlessM4TPreTrainedModel): _keys_to_ignore_on_load_missing = ["text_decoder", "t2u_model", "vocoder"] main_input_name = "input_features" _tied_weights_keys = [ "lm_head.weight", "text_decoder.embed_tokens.weight", ] def __init__(self, config: SeamlessM4TConfig): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) self.speech_encoder = SeamlessM4TSpeechEncoder(config) self.text_decoder = SeamlessM4TDecoder(config, self.shared) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.speech_encoder def get_decoder(self): return self.text_decoder def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_input_embeddings(self): return self.text_decoder.embed_tokens def set_input_embeddings(self, value): self.text_decoder.embed_tokens = value def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.lm_head, self.shared) @add_start_docstrings_to_model_forward(M4T_SPEECH_INPUTS_DOCSTRING) def forward( self, input_features: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]: if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.speech_encoder( input_features=input_features, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) encoder_attention_mask = attention_mask if attention_mask is not None: sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to( encoder_outputs[0].device ) encoder_attention_mask = _compute_new_attention_mask( hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.text_decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(decoder_outputs[0]) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(lm_logits.device) masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: outputs = decoder_outputs + encoder_outputs output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def generate( self, input_features=None, tgt_lang=None, generation_config=None, logits_processor=None, stopping_criteria=None, prefix_allowed_tokens_fn=None, synced_gpus=False, **kwargs, ): """ Generates sequences of token ids. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Parameters: input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`): Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details. tgt_lang (`str`, *optional*): The language to use as target language for translation. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a generation config. If a stopping criteria is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*): If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful for constrained generation conditioned on the prefix, as described in [Autoregressive Entity Retrieval](https://arxiv.org/abs/2010.00904). synced_gpus (`bool`, *optional*, defaults to `False`): Whether to continue running the while loop until max_length (needed for ZeRO stage 3) kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. The possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateEncoderDecoderOutput`], - [`~generation.GenerateBeamEncoderDecoderOutput`] """ text_decoder_input_ids = kwargs.pop("decoder_input_ids", None) # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids. if tgt_lang is not None: inputs = kwargs.get("input_embeds") if input_features is None else input_features inputs = ( inputs if inputs is not None else kwargs.get("encoder_outputs", {"last_hidden_state": None})["last_hidden_state"] ) batch_size = len(inputs) if hasattr(self.generation_config, "text_decoder_lang_to_code_id"): # also accept __xxx__ tgt_lang = tgt_lang.replace("__", "") if tgt_lang not in self.generation_config.text_decoder_lang_to_code_id: raise ValueError( f"""`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in {', '.join(self.generation_config.text_decoder_lang_to_code_id.keys())}""" ) # tgt_lang gets priority over decoder input ids text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang) text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device) else: raise ValueError( """This model generation config doesn't have a `text_decoder_lang_to_code_id` key which maps the target language to the right token id. Make sure to load the right generation config.""" ) else: # only a warning, otherwise errors appear in the tests logger.warning( """You must either specify a `tgt_lang` or pass a correct `text_decoder_input_ids` to get a correct generation, otherwise the generation will probably make no sense.""" ) return super().generate( input_features, generation_config, logits_processor, stopping_criteria, prefix_allowed_tokens_fn, synced_gpus, decoder_input_ids=text_decoder_input_ids, **kwargs, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "use_cache": use_cache, } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: # cached cross_attention states don't have to be reordered -> they are always the same reordered_past += ( tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past @add_start_docstrings( "The text-to-speech SeamlessM4T Model transformer which can be used for T2ST.", SEAMLESS_M4T_START_DOCSTRING, ) class SeamlessM4TForTextToSpeech(SeamlessM4TPreTrainedModel): _keys_to_ignore_on_load_missing = ["speech_encoder"] main_input_name = "input_ids" _tied_weights_keys = [ "lm_head.weight", "text_encoder.embed_tokens.weight", "text_decoder.embed_tokens.weight", ] def __init__(self, config: SeamlessM4TConfig): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) self.text_encoder = SeamlessM4TEncoder(config, self.shared) self.text_decoder = SeamlessM4TDecoder(config, self.shared) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() self.t2u_model = SeamlessM4TTextToUnitForConditionalGeneration(config) self.vocoder = SeamlessM4TCodeHifiGan(config) def get_encoder(self): return self.text_encoder def get_decoder(self): return self.text_decoder def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_input_embeddings(self): return self.text_decoder.embed_tokens def set_input_embeddings(self, value): self.text_encoder.embed_tokens = value self.text_decoder.embed_tokens = value self.shared = value def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.lm_head, self.shared) @add_start_docstrings_to_model_forward(M4T_TEXT_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]: if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn logger.warning( "This is the same forward method as `SeamlessM4TForTextToText`." "It doesn't use the text-to-unit model `SeamlessM4TTextToUnitForConditionalGeneration`." "If you want to generate speech, use the `.generate` method." ) encoder_outputs = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) encoder_attention_mask = attention_mask # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.text_decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(decoder_outputs[0]) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(lm_logits.device) masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: outputs = decoder_outputs + encoder_outputs output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @torch.no_grad() def generate( self, input_ids: Optional[torch.Tensor] = None, return_intermediate_token_ids: Optional[bool] = None, tgt_lang: Optional[str] = None, spkr_id: Optional[int] = 0, **kwargs, ) -> Union[torch.Tensor, SeamlessM4TGenerationOutput]: """ Generates translated audio waveforms. <Tip> This method successively calls the `.generate` function of two different sub-models. You can specify keyword arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments that will be passed to one of them. For example, calling `.generate(input_ids, num_beams=4, speech_do_sample=True)` will successively perform beam-search decoding on the text model, and multinomial beam-search sampling on the speech model. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) return_intermediate_token_ids (`bool`, *optional*): If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want to get translated text alongside the audio. tgt_lang (`str`, *optional*): The language to use as target language for translation. spkr_id (`int`, *optional*, defaults to 0): The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`. kwargs (*optional*): Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword arguments are of two types: - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model, except for `decoder_input_ids` which will only be passed through the text components. - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the text model and speech model respectively. It has the priority over the keywords without a prefix. This means you can, for example, specify a generation strategy for one generation but not for the other. Returns: `Union[SeamlessM4TGenerationOutput, Tuple[Tensor]]`: - If `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`]. - If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size, sequence_length)`and and `waveform_lengths` which gives the length of each sample. """ batch_size = len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds")) if tgt_lang is None: raise ValueError("You must specify a `tgt_lang` to generate translated speech.") else: # also accept __xxx__ tgt_lang = tgt_lang.replace("__", "") for key in ["text_decoder_lang_to_code_id", "t2u_lang_code_to_id", "vocoder_lang_code_to_id"]: lang_code_to_id = getattr(self.generation_config, key, None) if lang_code_to_id is None: raise ValueError( f"""This model generation config doesn't have a `{key}` key which maps the target language to the right token id. Make sure to load the right generation config.""" ) elif tgt_lang not in lang_code_to_id: raise ValueError( f"""`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4T supports more languages for text translation than for speech synthesis.""" ) kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs) kwargs_text["output_hidden_states"] = True kwargs_text["return_dict_in_generate"] = True kwargs_text["output_scores"] = True text_decoder_input_ids = kwargs_text.get("decoder_input_ids") # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids. text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang) text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device) kwargs_text["decoder_input_ids"] = text_decoder_input_ids # first generation text_generation_output = super().generate(input_ids, **kwargs_text) sequences = text_generation_output.sequences # prepare second generation num_return_sequences = len(sequences) // batch_size attention_mask = kwargs_speech.get("attention_mask", kwargs_text.get("attention_mask", None)) encoder_hidden_states = text_generation_output.encoder_hidden_states[-1] # take care of num_return_sequences # take most probable hidden states per batch of return_sequences # (batch_size*num_return_sequences, ...) -> (batch_size,...) if num_return_sequences > 1: idx_most_probable_sequences_per_batch = text_generation_output.sequences_scores.view(batch_size, -1) idx_most_probable_sequences_per_batch = idx_most_probable_sequences_per_batch.argmax(-1) idx_most_probable_sequences_per_batch = ( idx_most_probable_sequences_per_batch + torch.arange(batch_size).to(self.device) * num_return_sequences ) sequences = sequences[idx_most_probable_sequences_per_batch] # get decoder last hidden state - must do a pass through the text decoder t2u_input_embeds = self.text_decoder( input_ids=sequences, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask, ).last_hidden_state pad_token_id = self.generation_config.pad_token_id # Compute new attention mask seq_lens = (sequences != pad_token_id).int().sum(1) t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens) kwargs_speech["attention_mask"] = t2u_model_attention_mask # Compute t2u decoder_input_ids t2u_decoder_input_ids = kwargs_speech.get("decoder_input_ids") t2u_tgt_lang_id = self.generation_config.t2u_lang_code_to_id.get(tgt_lang) t2u_decoder_input_ids = torch.tensor([[self.config.t2u_eos_token_id, t2u_tgt_lang_id]] * batch_size).to( self.device ) kwargs_speech["decoder_input_ids"] = t2u_decoder_input_ids # second generation unit_ids = self.t2u_model.generate(inputs_embeds=t2u_input_embeds, **kwargs_speech) output_unit_ids = unit_ids.detach().clone() # get rid of t2u_decoder_input_ids unit_ids = unit_ids[:, kwargs_speech["decoder_input_ids"].shape[1] :] # replace eos per pad unit_ids[unit_ids == self.config.t2u_eos_token_id] = self.config.t2u_pad_token_id # offset of control symbols unit_ids = torch.where( unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset ) vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang) vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids)).to(self.device) spkr_id = torch.tensor([[spkr_id]] * len(unit_ids)).to(self.device) waveform, waveform_lengths = self.vocoder(input_ids=unit_ids, spkr_id=spkr_id, lang_id=vocoder_tgt_lang_id) if return_intermediate_token_ids: return SeamlessM4TGenerationOutput( waveform=waveform, waveform_lengths=waveform_lengths, sequences=sequences, unit_sequences=output_unit_ids, ) return waveform, waveform_lengths def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "use_cache": use_cache, } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: # cached cross_attention states don't have to be reordered -> they are always the same reordered_past += ( tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past @add_start_docstrings( "The speech-to-speech SeamlessM4T Model transformer which can be used for S2ST.", SEAMLESS_M4T_START_DOCSTRING, ) class SeamlessM4TForSpeechToSpeech(SeamlessM4TPreTrainedModel): _keys_to_ignore_on_load_missing = ["text_encoder"] main_input_name = "input_features" _tied_weights_keys = [ "lm_head.weight", "text_decoder.embed_tokens.weight", ] def __init__(self, config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) self.speech_encoder = SeamlessM4TSpeechEncoder(config) self.text_decoder = SeamlessM4TDecoder(config, self.shared) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() self.t2u_model = SeamlessM4TTextToUnitForConditionalGeneration(config) self.vocoder = SeamlessM4TCodeHifiGan(config) def get_encoder(self): return self.speech_encoder def get_decoder(self): return self.text_decoder def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_input_embeddings(self): return self.text_decoder.embed_tokens def set_input_embeddings(self, value): self.text_decoder.embed_tokens = value def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.lm_head, self.shared) @add_start_docstrings_to_model_forward(M4T_SPEECH_INPUTS_DOCSTRING) def forward( self, input_features: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]: if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn logger.warning( "This is the same forward method as `SeamlessM4TForSpeechToText`. It doesn't use `self.t2u_model`." "If you want to generate speech, use the `generate` method." ) encoder_outputs = self.speech_encoder( input_features=input_features, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) encoder_attention_mask = attention_mask if attention_mask is not None: sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to( encoder_outputs[0].device ) encoder_attention_mask = _compute_new_attention_mask( hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.text_decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(decoder_outputs[0]) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(lm_logits.device) masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: outputs = decoder_outputs + encoder_outputs output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @torch.no_grad() def generate( self, input_features: Optional[torch.Tensor] = None, return_intermediate_token_ids: Optional[bool] = None, tgt_lang: Optional[str] = None, spkr_id: Optional[int] = 0, **kwargs, ) -> Union[torch.Tensor, SeamlessM4TGenerationOutput]: """ Generates translated audio waveforms. <Tip> This method successively calls the `.generate` function of two different sub-models. You can specify keyword arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments that will be passed to one of them. For example, calling `.generate(input_features, num_beams=4, speech_do_sample=True)` will successively perform beam-search decoding on the text model, and multinomial beam-search sampling on the speech model. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Args: input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`): Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details. return_intermediate_token_ids (`bool`, *optional*): If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want to get translated text alongside the audio. tgt_lang (`str`, *optional*): The language to use as target language for translation. spkr_id (`int`, *optional*, defaults to 0): The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`. kwargs (*optional*): Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword arguments are of two types: - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model, except for `decoder_input_ids` which will only be passed through the text components. - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the text model and speech model respectively. It has the priority over the keywords without a prefix. This means you can, for example, specify a generation strategy for one generation but not for the other. Returns: `Union[SeamlessM4TGenerationOutput, Tuple[Tensor]]`: - If `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`]. - If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size, sequence_length)`and and `waveform_lengths` which gives the length of each sample. """ batch_size = len(input_features) if input_features is not None else len(kwargs.get("inputs_embeds")) if tgt_lang is None: raise ValueError("You must specify a `tgt_lang` to generate translated speech.") else: # also accept __xxx__ tgt_lang = tgt_lang.replace("__", "") for key in ["text_decoder_lang_to_code_id", "t2u_lang_code_to_id", "vocoder_lang_code_to_id"]: lang_code_to_id = getattr(self.generation_config, key, None) if lang_code_to_id is None: raise ValueError( f"""This model generation config doesn't have a `{key}` key which maps the target language to the right token id. Make sure to load the right generation config.""" ) elif tgt_lang not in lang_code_to_id: raise ValueError( f"""`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4T supports more languages for text translation than for speech synthesis.""" ) kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs) kwargs_text["output_hidden_states"] = True kwargs_text["return_dict_in_generate"] = True kwargs_text["output_scores"] = True text_decoder_input_ids = kwargs_text.get("decoder_input_ids") # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids. text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang) text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device) kwargs_text["decoder_input_ids"] = text_decoder_input_ids # first generation text_generation_output = super().generate(input_features, **kwargs_text) sequences = text_generation_output.sequences # prepare second generation num_return_sequences = len(sequences) // batch_size attention_mask = kwargs_speech.get("attention_mask", kwargs_text.get("attention_mask", None)) # get last_hidden_state from encoder encoder_hidden_states = self.speech_encoder(input_features=input_features, attention_mask=attention_mask)[0] # input modality = speech so new attention mask for the decoder if attention_mask is not None: sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to( encoder_hidden_states.device ) attention_mask = _compute_new_attention_mask( hidden_states=encoder_hidden_states, seq_lens=sub_sampled_lengths ) # take care of num_return_sequences # take most probable hidden states per batch of return_sequences # (batch_size*num_return_sequences, ...) -> (batch_size,...) if num_return_sequences > 1: idx_most_probable_sequences_per_batch = text_generation_output.sequences_scores.view(batch_size, -1) idx_most_probable_sequences_per_batch = idx_most_probable_sequences_per_batch.argmax(-1) idx_most_probable_sequences_per_batch = ( idx_most_probable_sequences_per_batch + torch.arange(batch_size).to(self.device) * num_return_sequences ) sequences = sequences[idx_most_probable_sequences_per_batch] # get decoder last hidden state - must do a pass through the text decoder t2u_input_embeds = self.text_decoder( input_ids=sequences, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask, ).last_hidden_state pad_token_id = self.generation_config.pad_token_id # Compute new attention mask seq_lens = (sequences != pad_token_id).int().sum(1) t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens) kwargs_speech["attention_mask"] = t2u_model_attention_mask # Compute t2u decoder_input_ids t2u_decoder_input_ids = kwargs_speech.get("decoder_input_ids") t2u_tgt_lang_id = self.generation_config.t2u_lang_code_to_id.get(tgt_lang) t2u_decoder_input_ids = torch.tensor([[self.config.t2u_eos_token_id, t2u_tgt_lang_id]] * batch_size).to( self.device ) kwargs_speech["decoder_input_ids"] = t2u_decoder_input_ids # second generation unit_ids = self.t2u_model.generate(inputs_embeds=t2u_input_embeds, **kwargs_speech) output_unit_ids = unit_ids.detach().clone() # get rid of t2u_decoder_input_ids unit_ids = unit_ids[:, kwargs_speech["decoder_input_ids"].shape[1] :] # replace eos per pad unit_ids[unit_ids == self.config.t2u_eos_token_id] = self.config.t2u_pad_token_id # offset of control symbols unit_ids = torch.where( unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset ) vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang) vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids)).to(self.device) spkr_id = torch.tensor([[spkr_id]] * len(unit_ids)).to(self.device) waveform, waveform_lengths = self.vocoder(input_ids=unit_ids, spkr_id=spkr_id, lang_id=vocoder_tgt_lang_id) if return_intermediate_token_ids: return SeamlessM4TGenerationOutput( waveform=waveform, waveform_lengths=waveform_lengths, sequences=sequences, unit_sequences=output_unit_ids, ) return waveform, waveform_lengths @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: # cached cross_attention states don't have to be reordered -> they are always the same reordered_past += ( tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "use_cache": use_cache, } @add_start_docstrings( "The original SeamlessM4T Model transformer which can be used for every tasks available (S2ST, S2TT, T2TT, T2ST).", SEAMLESS_M4T_START_DOCSTRING, """ current_modality (`str`, *optional*, defaults to `"text"`): Default modality. Used to initialize the model. """, ) class SeamlessM4TModel(SeamlessM4TPreTrainedModel): _tied_weights_keys = [ "lm_head.weight", "text_encoder.embed_tokens.weight", "text_decoder.embed_tokens.weight", ] def __init__(self, config, current_modality="text"): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) self.text_encoder = SeamlessM4TEncoder(config, self.shared) self.speech_encoder = SeamlessM4TSpeechEncoder(config) self.text_decoder = SeamlessM4TDecoder(config, self.shared) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() self.current_modality = current_modality if current_modality == "speech": self.main_input_name = "input_features" # these models already call post_init in their initialization self.t2u_model = SeamlessM4TTextToUnitForConditionalGeneration(config) self.vocoder = SeamlessM4TCodeHifiGan(config) def set_modality(self, modality="text"): if modality == "text": self.main_input_name = "input_ids" self.current_modality = "text" elif modality == "speech": self.main_input_name = "input_features" self.current_modality = "speech" else: raise ValueError(f"`modality={modality}` is not a valid modality. It must be `text` or `speech`.") def get_encoder(self): if self.current_modality == "text": return self.text_encoder else: return self.speech_encoder def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_input_embeddings(self): return self.text_decoder.embed_tokens def set_input_embeddings(self, value): self.text_encoder.embed_tokens = value self.text_decoder.embed_tokens = value self.shared = value def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.lm_head, self.shared) @add_start_docstrings_to_model_forward(M4T_MODEL_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.LongTensor] = None, input_features: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) if input_ids is None and input_features is None and inputs_embeds is None and encoder_outputs is None: raise ValueError( "`input_ids`,`input_features`, `inputs_embeds` and `encoder_outputs` are all empty. Make sure at least one of them is not." ) elif input_features is not None: if input_ids is not None: logger.warning( "`input_ids` is not `None` but `input_features` has been given." "`input_features` will be used in priority through the `speech_encoder`. " "Make sure that `input_features` and `input_ids` are mutually exclusive." ) if inputs_embeds is not None: logger.warning( "`inputs_embeds` is not `None` but `input_features` has been given." "`input_features` will be used in priority through `speech_encoder`. " "`inputs_embeds` will be ignored." ) # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn logger.warning( "This calls the same method `forward` as `SeamlessM4TForTextToText` and `SeamlessM4TForSpeechToText`" "depending on the input modality. If you want to generate speech, use the `generate` method." ) self.set_modality("speech") encoder_outputs = self.speech_encoder( input_features=input_features, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif input_ids is not None or inputs_embeds is not None: # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn logger.warning( "This calls the same method `forward` as `SeamlessM4TForTextToText` and `SeamlessM4TForSpeechToText`" "depending on the input modality. If you want to generate speech, use the `generate` method." ) self.set_modality("text") encoder_outputs = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) encoder_attention_mask = attention_mask # input modality = speech so new attention mask if self.current_modality == "speech" and attention_mask is not None: sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to( encoder_outputs[0].device ) encoder_attention_mask = _compute_new_attention_mask( hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.text_decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(decoder_outputs[0]) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(lm_logits.device) masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: outputs = decoder_outputs + encoder_outputs output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @torch.no_grad() def generate( self, input_ids: Optional[torch.Tensor] = None, input_features: Optional[torch.Tensor] = None, return_intermediate_token_ids: Optional[bool] = None, tgt_lang: Optional[str] = None, spkr_id: Optional[int] = 0, generate_speech: Optional[bool] = True, **kwargs, ) -> Union[torch.Tensor, SeamlessM4TGenerationOutput]: """ Generates translated token ids and/or translated audio waveforms. <Tip> This method successively calls the `.generate` function of two different sub-models. You can specify keyword arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments that will be passed to one of them. For example, calling `.generate(input_ids=input_ids, num_beams=4, speech_do_sample=True)` will successively perform beam-search decoding on the text model, and multinomial beam-search sampling on the speech model. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`, *optional*): Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details. return_intermediate_token_ids (`bool`, *optional*): If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want to get translated text alongside the audio. Note that if `generate_speech=True`, this parameter will be ignored. tgt_lang (`str`, *optional*): The language to use as target language for translation. spkr_id (`int`, *optional*, defaults to 0): The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`. generate_speech (`bool`, *optional*, defaults to `True`): If `False`, will only returns the text tokens and won't generate speech. kwargs (*optional*): Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword arguments are of two types: - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model, except for `decoder_input_ids` which will only be passed through the text components. - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the text model and speech model respectively. It has the priority over the keywords without a prefix. This means you can, for example, specify a generation strategy for one generation but not for the other. Returns: `Union[SeamlessM4TGenerationOutput, Tuple[Tensor], ModelOutput]`: - If `generate_speech` and `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`]. - If `generate_speech` and not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size, sequence_length)`and and `waveform_lengths` which gives the length of each sample. - If `generate_speech=False`, it will returns `ModelOutput`. """ if input_ids is None and input_features is None and kwargs.get("inputs_embeds", None) is None: raise ValueError( "`input_ids`,`input_features` and `inputs_embeds` are all empty. Make sure at least one of them is not." ) if generate_speech and tgt_lang is None: raise ValueError("You must specify a `tgt_lang` to generate translated speech.") if tgt_lang is not None: # also accept __xxx__ tgt_lang = tgt_lang.replace("__", "") for key in ["text_decoder_lang_to_code_id", "t2u_lang_code_to_id", "vocoder_lang_code_to_id"]: lang_code_to_id = getattr(self.generation_config, key, None) if lang_code_to_id is None: raise ValueError( f"""This model generation config doesn't have a `{key}` key which maps the target language to the right token id. Make sure to load the right generation config.""" ) elif tgt_lang not in lang_code_to_id: raise ValueError( f"""`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4T supports more languages for text translation than for speech synthesis.""" ) batch_size = ( len(input_features) if input_features is not None else (len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds"))) ) kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs) kwargs_text["output_hidden_states"] = True kwargs_text["return_dict_in_generate"] = True kwargs_text["output_scores"] = True text_decoder_input_ids = kwargs_text.get("decoder_input_ids") # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids. if tgt_lang is not None: # tgt_lang gets priority over decoder input ids text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang) text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device) kwargs_text["decoder_input_ids"] = text_decoder_input_ids # first generation if input_features is not None: self.set_modality("speech") if input_ids is not None: logger.warning( "`input_features` and `input_ids` are both non empty. `input_features` will be used in priority " "through the speech encoder. Make sure `input_features=None` if you want to use the text encoder." ) text_generation_output = super().generate(input_features=input_features, **kwargs_text) else: self.set_modality("text") text_generation_output = super().generate(input_ids=input_ids, input_features=None, **kwargs_text) sequences = text_generation_output.sequences if not generate_speech: return text_generation_output # prepare second generation num_return_sequences = len(sequences) // batch_size attention_mask = kwargs_speech.get("attention_mask", kwargs_text.get("attention_mask", None)) # get encoder last hidden states if self.current_modality == "speech": # get last_hidden_state from encoder - must do a pass through the speech encoder encoder_hidden_states = self.speech_encoder( input_features=input_features, attention_mask=attention_mask ).last_hidden_state # input modality = speech so new attention mask for the decoder if attention_mask is not None: sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to( encoder_hidden_states.device ) attention_mask = _compute_new_attention_mask( hidden_states=encoder_hidden_states, seq_lens=sub_sampled_lengths ) else: encoder_hidden_states = text_generation_output.encoder_hidden_states[-1] # take care of num_return_sequences # take most probable hidden states per batch of return_sequences # (batch_size*num_return_sequences, ...) -> (batch_size,...) if num_return_sequences > 1: idx_most_probable_sequences_per_batch = text_generation_output.sequences_scores.view(batch_size, -1) idx_most_probable_sequences_per_batch = idx_most_probable_sequences_per_batch.argmax(-1) idx_most_probable_sequences_per_batch = ( idx_most_probable_sequences_per_batch + torch.arange(batch_size).to(self.device) * num_return_sequences ) sequences = sequences[idx_most_probable_sequences_per_batch] # get decoder last hidden state - must do a pass through the text decoder t2u_input_embeds = self.text_decoder( input_ids=sequences, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask, ).last_hidden_state pad_token_id = self.generation_config.pad_token_id # Compute new attention mask seq_lens = (sequences != pad_token_id).int().sum(1) t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens) kwargs_speech["attention_mask"] = t2u_model_attention_mask # Compute t2u decoder_input_ids t2u_decoder_input_ids = kwargs_speech.get("decoder_input_ids") t2u_tgt_lang_id = self.generation_config.t2u_lang_code_to_id.get(tgt_lang) t2u_decoder_input_ids = torch.tensor([[self.config.t2u_eos_token_id, t2u_tgt_lang_id]] * batch_size).to( self.device ) kwargs_speech["decoder_input_ids"] = t2u_decoder_input_ids # second generation unit_ids = self.t2u_model.generate(inputs_embeds=t2u_input_embeds, **kwargs_speech) output_unit_ids = unit_ids.detach().clone() # get rid of t2u_decoder_input_ids unit_ids = unit_ids[:, kwargs_speech["decoder_input_ids"].shape[1] :] # replace eos per pad unit_ids[unit_ids == self.config.t2u_eos_token_id] = self.config.t2u_pad_token_id # offset of control symbols unit_ids = torch.where( unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset ) vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang) vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids)).to(self.device) spkr_id = torch.tensor([[spkr_id]] * len(unit_ids)).to(self.device) waveform, waveform_lengths = self.vocoder(input_ids=unit_ids, spkr_id=spkr_id, lang_id=vocoder_tgt_lang_id) if return_intermediate_token_ids: return SeamlessM4TGenerationOutput( waveform=waveform, waveform_lengths=waveform_lengths, sequences=sequences, unit_sequences=output_unit_ids, ) return waveform, waveform_lengths def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "use_cache": use_cache, } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: # cached cross_attention states don't have to be reordered -> they are always the same reordered_past += ( tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past
transformers/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py/0
{ "file_path": "transformers/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py", "repo_id": "transformers", "token_count": 89454 }
342
# coding=utf-8 # Copyright 2021 ASAPP Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ SEW model configuration""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) SEW_PRETRAINED_CONFIG_ARCHIVE_MAP = { "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json", # See all SEW models at https://huggingface.co/models?filter=sew } class SEWConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SEWModel`]. It is used to instantiate a SEW model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SEW [asapp/sew-tiny-100k](https://huggingface.co/asapp/sew-tiny-100k) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32): Vocabulary size of the SEW model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`SEW`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. squeeze_factor (`int`, *optional*, defaults to 2): Sequence length downsampling factor after the encoder and upsampling factor after the transformer. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`SEWForCTC`]. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. feat_extract_norm (`str`, *optional*, defaults to `"group"`): The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D convolutional layers. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for output of the feature encoder. feat_extract_activation (`str, `optional`, defaults to `"gelu"`): The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512)`): A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers. conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1)`): A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of *conv_kernel* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_bias (`bool`, *optional*, defaults to `False`): Whether the 1D convolutional layers have a bias. num_conv_pos_embeddings (`int`, *optional*, defaults to 128): Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): Number of groups of 1D convolutional positional embeddings layer. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://arxiv.org/abs/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2),: The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks'' mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0),: The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks'' ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`SEWForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`SEWForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`Wav2Vec2ForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 256): Dimensionality of the projection before token mean-pooling for classification. Example: ```python >>> from transformers import SEWConfig, SEWModel >>> # Initializing a SEW asapp/sew-tiny-100k style configuration >>> configuration = SEWConfig() >>> # Initializing a model (with random weights) from the asapp/sew-tiny-100k style configuration >>> model = SEWModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "sew" def __init__( self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, squeeze_factor=2, hidden_act="gelu", hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-5, feat_extract_norm="group", feat_extract_activation="gelu", conv_dim=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), conv_stride=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), conv_kernel=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, ctc_loss_reduction="mean", ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, pad_token_id=0, bos_token_id=1, eos_token_id=2, **kwargs, ): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_feat_extract_layers = len(self.conv_dim) self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.squeeze_factor = squeeze_factor self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.vocab_size = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. " "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, " f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) " f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks # ctc loss self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # sequence classification self.use_weighted_layer_sum = use_weighted_layer_sum self.classifier_proj_size = classifier_proj_size @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1)
transformers/src/transformers/models/sew/configuration_sew.py/0
{ "file_path": "transformers/src/transformers/models/sew/configuration_sew.py", "repo_id": "transformers", "token_count": 5652 }
343
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Wav2Vec2 checkpoint.""" import argparse import fairseq import torch from torch import nn from transformers import ( MBart50Tokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, Wav2Vec2Config, Wav2Vec2FeatureExtractor, Wav2Vec2Model, logging, ) logging.set_verbosity_info() logger = logging.get_logger(__name__) MAPPING = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } TOP_LEVEL_KEYS = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split("."): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": hf_pointer.weight.data = value elif weight_type == "weight_g": hf_pointer.weight_g.data = value elif weight_type == "weight_v": hf_pointer.weight_v.data = value elif weight_type == "bias": hf_pointer.bias.data = value else: hf_pointer.data = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.") def recursively_load_weights_wav2vec2(fairseq_model, hf_model): unused_weights = [] fairseq_dict = fairseq_model.state_dict() feature_extractor = hf_model.feature_extractor adapter = hf_model.adapter for name, value in fairseq_dict.items(): is_used = False if "conv_layers" in name: load_conv_layer( name, value, feature_extractor, unused_weights, hf_model.config.feat_extract_norm == "group", ) is_used = True elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."]): load_adapter(name, value, adapter, unused_weights) is_used = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: is_used = True if "*" in mapped_key: layer_index = name.split(key)[0].split(".")[-2] mapped_key = mapped_key.replace("*", layer_index) if "weight_g" in name: weight_type = "weight_g" elif "weight_v" in name: weight_type = "weight_v" elif "bias" in name: weight_type = "bias" elif "weight" in name: weight_type = "weight" else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f"Unused weights: {unused_weights}") def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm): name = full_name.split("conv_layers.")[-1] items = name.split(".") layer_id = int(items[0]) type_id = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.bias.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.weight.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") else: unused_weights.append(full_name) def load_adapter(full_name, value, adapter, unused_weights): name = full_name.split("adaptor.")[-1] items = name.split(".") if items[1].isdigit(): layer_id = int(items[1]) else: layer_id = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found." adapter.proj_layer_norm.bias.data = value logger.info(f"Adapter proj layer norm bias was initialized from {full_name}.") if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found." adapter.proj_layer_norm.weight.data = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found." adapter.proj.bias.data = value logger.info(f"Adapter proj layer bias was initialized from {full_name}.") if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found." adapter.proj.weight.data = value logger.info(f"Adapter proj layer weight was initialized from {full_name}.") elif isinstance(layer_id, int): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found." adapter.layers[layer_id].conv.bias.data = value logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}.") elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found." adapter.layers[layer_id].conv.weight.data = value logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}.") else: unused_weights.append(full_name) def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer @torch.no_grad() def convert_wav2vec2_checkpoint( checkpoint_path, pytorch_dump_folder_path, dict_path, config_yaml_path, encoder_config_path, decoder_config_path, add_adapter, adapter_kernel_size, adapter_stride, decoder_start_token_id, encoder_output_dim, ): """ Copy/paste/tweak model's weights to transformers design. """ # load configs encoder_config = Wav2Vec2Config.from_pretrained( encoder_config_path, add_adapter=True, adapter_stride=adapter_stride, adapter_kernel_size=adapter_kernel_size, token_token=True, output_hidden_size=encoder_output_dim, ) decoder_config = MBartConfig.from_pretrained(decoder_config_path) # load model model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={ "config_yaml": config_yaml_path, "data": "/".join(dict_path.split("/")[:-1]), "w2v_path": checkpoint_path, "load_pretrained_decoder_from": None, }, ) model = model[0].eval() # load feature extractor feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(encoder_config_path, token_token=True) # set weights for wav2vec2 encoder hf_encoder = Wav2Vec2Model(encoder_config) recursively_load_weights_wav2vec2(model.encoder, hf_encoder) # load decoder weights hf_decoder = MBartForCausalLM(decoder_config) missing_keys, unexpected_keys = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=False) logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}") logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}") hf_wav2vec = SpeechEncoderDecoderModel(encoder=hf_encoder, decoder=hf_decoder) hf_wav2vec.config.tie_word_embeddings = False tokenizer = MBart50Tokenizer(dict_path) tokenizer.save_pretrained(pytorch_dump_folder_path) config = hf_wav2vec.config.to_dict() config["pad_token_id"] = tokenizer.pad_token_id config["bos_token_id"] = tokenizer.bos_token_id config["eos_token_id"] = tokenizer.eos_token_id config["tokenizer_class"] = "mbart50" config["feature_extractor_type"] = "wav2vec2" config["decoder_start_token_id"] = tokenizer.eos_token_id config["forced_bos_token_id"] = 250004 config["forced_eos_token_id"] = tokenizer.eos_token_id hf_wav2vec.config = SpeechEncoderDecoderConfig.from_dict(config) hf_wav2vec.save_pretrained(pytorch_dump_folder_path) feature_extractor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-xls-r-1b", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/mbart-large-50-one-to-many-mmt", type=str, help="Path to hf decoder checkpoint config", ) parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers") parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers") parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers") parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim") parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config") args = parser.parse_args() convert_wav2vec2_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
transformers/src/transformers/models/speech_encoder_decoder/convert_mbart_wav2vec2_seq2seq_original_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/speech_encoder_decoder/convert_mbart_wav2vec2_seq2seq_original_to_pytorch.py", "repo_id": "transformers", "token_count": 6577 }
344
# coding=utf-8 # Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for Speech2Text2.""" import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "tokenizer_config_file": "tokenizer_config.json", "merges_file": "merges.txt", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json" ), }, "tokenizer_config_file": { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json" ), }, "merges_file": { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt" ), }, } BPE_TOKEN_MERGES = "</w>" BPE_TOKEN_VOCAB = "@@ " def get_pairs(word): """ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs # Speech2Text2 has no max input length PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"facebook/s2t-wav2vec2-large-en-de": 1024} class Speech2Text2Tokenizer(PreTrainedTokenizer): """ Constructs a Speech2Text2Tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to the superclass for more information regarding such methods. Args: vocab_file (`str`): File containing the vocabulary. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sentence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sentence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. **kwargs Additional keyword arguments passed along to [`PreTrainedTokenizer`] """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, bos_token="<s>", pad_token="<pad>", eos_token="</s>", unk_token="<unk>", do_lower_case=False, merges_file=None, **kwargs, ): self.do_lower_case = do_lower_case with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f"No merges files provided. {self.__class__.__name__} can only be used for decoding.") self.bpe_ranks = None self.cache = None else: with open(merges_file, encoding="utf-8") as merges_handle: merges = merges_handle.read().split("\n")[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__( unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, do_lower_case=do_lower_case, **kwargs, ) @property def vocab_size(self) -> int: return len(self.decoder) def get_vocab(self) -> Dict: return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) if word == "\n " + BPE_TOKEN_MERGES: word = "\n" + BPE_TOKEN_MERGES if word.endswith(BPE_TOKEN_MERGES): word = word.replace(BPE_TOKEN_MERGES, "") word = word.replace(" ", BPE_TOKEN_VOCAB) self.cache[token] = word return word def _tokenize(self, text): """Tokenize a string.""" if self.bpe_ranks is None: raise ValueError( "This tokenizer was instantiated without a `merges.txt` file, so" " that it can only be used for decoding, not for encoding. " "Make sure to provide `merges.txt` file at instantiation to enable " "encoding." ) if self.do_lower_case: text = text.lower() text = text.split() split_tokens = [] for token in text: if token: split_tokens.extend(list(self.bpe(token).split(" "))) return split_tokens def _convert_token_to_id(self, token: str) -> int: """Converts a token (str) in an index (integer) using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) in a token (str) using the vocab.""" result = self.decoder.get(index, self.unk_token) return result def convert_tokens_to_string(self, tokens: List[str]) -> str: """ Converts a list of output tokens into a single string. """ # combine tokens string = " ".join(tokens) # make sure @@ tokens are concatenated string = "".join(string.split(BPE_TOKEN_VOCAB)) return string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merges_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 if self.bpe_ranks is None: return (vocab_file,) with open(merges_file, "w", encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return (vocab_file, merges_file)
transformers/src/transformers/models/speech_to_text_2/tokenization_speech_to_text_2.py/0
{ "file_path": "transformers/src/transformers/models/speech_to_text_2/tokenization_speech_to_text_2.py", "repo_id": "transformers", "token_count": 4350 }
345
# coding=utf-8 # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ SqueezeBERT model configuration""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/config.json" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/config.json", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/config.json" ), } class SqueezeBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SqueezeBertModel`]. It is used to instantiate a SqueezeBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SqueezeBERT [squeezebert/squeezebert-uncased](https://huggingface.co/squeezebert/squeezebert-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the SqueezeBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`SqueezeBertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): pad_token_id (`int`, *optional*, defaults to 0): The ID of the token in the word embedding to use as padding. embedding_size (`int`, *optional*, defaults to 768): The dimension of the word embedding vectors. q_groups (`int`, *optional*, defaults to 4): The number of groups in Q layer. k_groups (`int`, *optional*, defaults to 4): The number of groups in K layer. v_groups (`int`, *optional*, defaults to 4): The number of groups in V layer. post_attention_groups (`int`, *optional*, defaults to 1): The number of groups in the first feed forward network layer. intermediate_groups (`int`, *optional*, defaults to 4): The number of groups in the second feed forward network layer. output_groups (`int`, *optional*, defaults to 4): The number of groups in the third feed forward network layer. Examples: ```python >>> from transformers import SqueezeBertConfig, SqueezeBertModel >>> # Initializing a SqueezeBERT configuration >>> configuration = SqueezeBertConfig() >>> # Initializing a model (with random weights) from the configuration above >>> model = SqueezeBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained checkpoints. """ pretrained_config_archive_map = SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP model_type = "squeezebert" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, embedding_size=768, q_groups=4, k_groups=4, v_groups=4, post_attention_groups=1, intermediate_groups=4, output_groups=4, **kwargs, ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.embedding_size = embedding_size self.q_groups = q_groups self.k_groups = k_groups self.v_groups = v_groups self.post_attention_groups = post_attention_groups self.intermediate_groups = intermediate_groups self.output_groups = output_groups # # Copied from transformers.models.bert.configuration_bert.BertOnxxConfig with Bert->SqueezeBert class SqueezeBertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
transformers/src/transformers/models/squeezebert/configuration_squeezebert.py/0
{ "file_path": "transformers/src/transformers/models/squeezebert/configuration_squeezebert.py", "repo_id": "transformers", "token_count": 3050 }
346
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Swin2SR checkpoints from the original repository. URL: https://github.com/mv-lab/swin2sr""" import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import Swin2SRConfig, Swin2SRForImageSuperResolution, Swin2SRImageProcessor def get_config(checkpoint_url): config = Swin2SRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: config.upscale = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: config.upscale = 4 config.image_size = 48 config.upsampler = "pixelshuffle_aux" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: config.depths = [6, 6, 6, 6] config.embed_dim = 60 config.num_heads = [6, 6, 6, 6] config.upsampler = "pixelshuffledirect" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: config.upscale = 4 config.upsampler = "nearest+conv" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: config.num_channels = 1 config.upscale = 1 config.image_size = 126 config.window_size = 7 config.img_range = 255.0 config.upsampler = "" return config def rename_key(name, config): if "patch_embed.proj" in name and "layers" not in name: name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection") if "patch_embed.norm" in name: name = name.replace("patch_embed.norm", "embeddings.patch_embeddings.layernorm") if "layers" in name: name = name.replace("layers", "encoder.stages") if "residual_group.blocks" in name: name = name.replace("residual_group.blocks", "layers") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name: name = name.replace("attn", "attention.self") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if "q_bias" in name: name = name.replace("q_bias", "query.bias") if "k_bias" in name: name = name.replace("k_bias", "key.bias") if "v_bias" in name: name = name.replace("v_bias", "value.bias") if "cpb_mlp" in name: name = name.replace("cpb_mlp", "continuous_position_bias_mlp") if "patch_embed.proj" in name: name = name.replace("patch_embed.proj", "patch_embed.projection") if name == "norm.weight": name = "layernorm.weight" if name == "norm.bias": name = "layernorm.bias" if "conv_first" in name: name = name.replace("conv_first", "first_convolution") if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: name = name.replace("conv_last", "final_convolution") if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: name = name.replace("conv_before_upsample.0", "conv_before_upsample") if "upsample.0" in name: name = name.replace("upsample.0", "upsample.convolution_0") if "upsample.2" in name: name = name.replace("upsample.2", "upsample.convolution_1") name = "upsample." + name elif config.upsampler == "pixelshuffledirect": name = name.replace("upsample.0.weight", "upsample.conv.weight") name = name.replace("upsample.0.bias", "upsample.conv.bias") else: pass else: name = "swin2sr." + name return name def convert_state_dict(orig_state_dict, config): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if "qkv" in key: key_split = key.split(".") stage_num = int(key_split[1]) block_num = int(key_split[4]) dim = config.embed_dim if "weight" in key: orig_state_dict[ f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.query.weight" ] = val[:dim, :] orig_state_dict[ f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.key.weight" ] = val[dim : dim * 2, :] orig_state_dict[ f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.value.weight" ] = val[-dim:, :] else: orig_state_dict[ f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.query.bias" ] = val[:dim] orig_state_dict[ f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.key.bias" ] = val[dim : dim * 2] orig_state_dict[ f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.value.bias" ] = val[-dim:] pass else: orig_state_dict[rename_key(key, config)] = val return orig_state_dict def convert_swin2sr_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub): config = get_config(checkpoint_url) model = Swin2SRForImageSuperResolution(config) model.eval() state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") new_state_dict = convert_state_dict(state_dict, config) missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False) if len(missing_keys) > 0: raise ValueError("Missing keys when converting: {}".format(missing_keys)) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f"Unexpected key {key} in state_dict") # verify values url = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") processor = Swin2SRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values image_size = 126 if "Jpeg" in checkpoint_url else 256 transforms = Compose( [ Resize((image_size, image_size)), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ] ) pixel_values = transforms(image).unsqueeze(0) if config.num_channels == 1: pixel_values = pixel_values[:, 0, :, :].unsqueeze(1) outputs = model(pixel_values) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: expected_shape = torch.Size([1, 3, 512, 512]) expected_slice = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: expected_shape = torch.Size([1, 3, 1024, 1024]) expected_slice = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here expected_shape = torch.Size([1, 3, 1024, 1024]) expected_slice = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: expected_shape = torch.Size([1, 3, 512, 512]) expected_slice = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: expected_shape = torch.Size([1, 3, 1024, 1024]) expected_slice = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], expected_slice, atol=1e-3) print("Looks ok!") url_to_name = { "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": ( "swin2SR-classical-sr-x2-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": ( "swin2SR-classical-sr-x4-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": ( "swin2SR-compressed-sr-x4-48" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": ( "swin2SR-lightweight-x2-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": ( "swin2SR-realworld-sr-x4-64-bsrgan-psnr" ), } model_name = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model.push_to_hub(f"caidas/{model_name}") processor.push_to_hub(f"caidas/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth", type=str, help="URL of the original Swin2SR checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.") args = parser.parse_args() convert_swin2sr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/swin2sr/convert_swin2sr_original_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/swin2sr/convert_swin2sr_original_to_pytorch.py", "repo_id": "transformers", "token_count": 5318 }
347
# coding=utf-8 # Copyright 2021 Google Research and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 TAPAS model.""" from __future__ import annotations import enum import math from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutputWithPastAndCrossAttentions, TFBaseModelOutputWithPooling, TFMaskedLMOutput, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_tensorflow_probability_available, logging, replace_return_docstrings, requires_backends, ) from .configuration_tapas import TapasConfig logger = logging.get_logger(__name__) # soft dependency if is_tensorflow_probability_available(): try: import tensorflow_probability as tfp # On the first call, check whether a compatible version of TensorFlow is installed # TensorFlow Probability depends on a recent stable release of TensorFlow n = tfp.distributions.Normal(loc=0.0, scale=1.0) except ImportError: logger.error( "TAPAS models are not usable since `tensorflow_probability` can't be loaded. " "It seems you have `tensorflow_probability` installed with the wrong tensorflow version. " "Please try to reinstall it following the instructions here: https://github.com/tensorflow/probability." ) _CONFIG_FOR_DOC = "TapasConfig" _CHECKPOINT_FOR_DOC = "google/tapas-base" TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = [ # large models "google/tapas-large", "google/tapas-large-finetuned-sqa", "google/tapas-large-finetuned-wtq", "google/tapas-large-finetuned-wikisql-supervised", "google/tapas-large-finetuned-tabfact", # base models "google/tapas-base", "google/tapas-base-finetuned-sqa", "google/tapas-base-finetuned-wtq", "google/tapas-base-finetuned-wikisql-supervised", "google/tapas-base-finetuned-tabfact", # small models "google/tapas-small", "google/tapas-small-finetuned-sqa", "google/tapas-small-finetuned-wtq", "google/tapas-small-finetuned-wikisql-supervised", "google/tapas-small-finetuned-tabfact", # mini models "google/tapas-mini", "google/tapas-mini-finetuned-sqa", "google/tapas-mini-finetuned-wtq", "google/tapas-mini-finetuned-wikisql-supervised", "google/tapas-mini-finetuned-tabfact", # tiny models "google/tapas-tiny", "google/tapas-tiny-finetuned-sqa", "google/tapas-tiny-finetuned-wtq", "google/tapas-tiny-finetuned-wikisql-supervised", "google/tapas-tiny-finetuned-tabfact", # See all TAPAS models at https://huggingface.co/models?filter=tapas ] EPSILON_ZERO_DIVISION = 1e-10 CLOSE_ENOUGH_TO_LOG_ZERO = -10000.0 @dataclass class TFTableQuestionAnsweringOutput(ModelOutput): """ Output type of [`TFTapasForQuestionAnswering`]. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` (and possibly `answer`, `aggregation_labels`, `numeric_values` and `numeric_values_scale` are provided)): Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the semi-supervised regression loss and (optionally) supervised loss for aggregations. logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Prediction scores of the cell selection head, for every token. logits_aggregation (`tf.Tensor`, *optional*, of shape `(batch_size, num_aggregation_labels)`): Prediction scores of the aggregation head, for every aggregation operator. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None logits_aggregation: tf.Tensor | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None class TFTapasEmbeddings(keras.layers.Layer): """ Construct the embeddings from word, position and token_type embeddings. Same as BertEmbeddings but with a number of additional token type embeddings to encode tabular structure. """ def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.config = config self.number_of_token_type_embeddings = len(config.type_vocab_sizes) self.reset_position_index_per_cell = config.reset_position_index_per_cell self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape=None): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range), ) for i, type_vocab_size in enumerate(self.config.type_vocab_sizes): with tf.name_scope(f"token_type_embeddings_{i}"): setattr( self, f"token_type_embeddings_{i}", self.add_weight( name="embeddings", shape=[type_vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ), ) if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) def call( self, input_ids: tf.Tensor = None, position_ids: tf.Tensor = None, token_type_ids: tf.Tensor = None, inputs_embeds: tf.Tensor = None, training: bool = False, ) -> tf.Tensor: """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: input_shape = shape_list(input_ids) else: input_shape = shape_list(inputs_embeds)[:-1] seq_length = input_shape[1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape + [self.number_of_token_type_embeddings], value=0) if position_ids is None: # create absolute position embeddings position_ids = tf.expand_dims(tf.range(start=0, limit=seq_length), axis=0) position_ids = tf.broadcast_to(position_ids, shape=input_shape) # when self.config.reset_position_index_per_cell is set to True, create relative position embeddings if self.reset_position_index_per_cell: # shape (batch_size, seq_len) col_index = IndexMap(token_type_ids[:, :, 1], self.config.type_vocab_sizes[1], batch_dims=1) # shape (batch_size, seq_len) row_index = IndexMap(token_type_ids[:, :, 2], self.config.type_vocab_sizes[2], batch_dims=1) # shape (batch_size, seq_len) full_index = ProductIndexMap(col_index, row_index) # shape (max_rows * max_columns,). First absolute position for every cell first_position_per_segment = reduce_min(position_ids, full_index)[0] # ? shape (batch_size, seq_len). First absolute position of the cell for every token first_position = gather(first_position_per_segment, full_index) # shape (1, seq_len) position = tf.expand_dims(tf.range(start=0, limit=seq_length), axis=0) position_ids = tf.math.minimum(self.max_position_embeddings - 1, position - first_position) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) position_embeddings = tf.gather(self.position_embeddings, indices=position_ids) final_embeddings = inputs_embeds + position_embeddings for i in range(self.number_of_token_type_embeddings): name = f"token_type_embeddings_{i}" final_embeddings += tf.gather(params=getattr(self, name), indices=token_type_ids[:, :, i]) final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Tapas class TFTapasSelfAttention(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number " f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder self.config = config def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(inputs=hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) key_layer = tf.concat([past_key_value[0], key_layer], axis=2) value_layer = tf.concat([past_key_value[1], value_layer], axis=2) else: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFTapasModel call() function) attention_scores = tf.add(attention_scores, attention_mask) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(inputs=attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Tapas class TFTapasSelfOutput(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->Tapas class TFTapasAttention(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.self_attention = TFTapasSelfAttention(config, name="self") self.dense_output = TFTapasSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call( self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: self_outputs = self.self_attention( hidden_states=input_tensor, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self.dense_output( hidden_states=self_outputs[0], input_tensor=input_tensor, training=training ) # add attentions (possibly with past_key_value) if we output them outputs = (attention_output,) + self_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attention", None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, "dense_output", None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Tapas class TFTapasIntermediate(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Tapas class TFTapasOutput(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->Tapas class TFTapasLayer(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.attention = TFTapasAttention(config, name="attention") self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = TFTapasAttention(config, name="crossattention") self.intermediate = TFTapasIntermediate(config, name="intermediate") self.bert_output = TFTapasOutput(config, name="output") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_value: Tuple[tf.Tensor] | None, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( input_tensor=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( input_tensor=attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value intermediate_output = self.intermediate(hidden_states=attention_output) layer_output = self.bert_output( hidden_states=intermediate_output, input_tensor=attention_output, training=training ) outputs = (layer_output,) + outputs # add attentions if we output them # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "bert_output", None) is not None: with tf.name_scope(self.bert_output.name): self.bert_output.build(None) if getattr(self, "crossattention", None) is not None: with tf.name_scope(self.crossattention.name): self.crossattention.build(None) # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->Tapas class TFTapasEncoder(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layer = [TFTapasLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_values: Tuple[Tuple[tf.Tensor]] | None, use_cache: Optional[bool], output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Tapas class TFTapasPooler(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Tapas class TFTapasPredictionHeadTransform(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Tapas class TFTapasLMPredictionHead(keras.layers.Layer): def __init__(self, config: TapasConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.transform = TFTapasPredictionHeadTransform(config, name="transform") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") if self.built: return self.built = True if getattr(self, "transform", None) is not None: with tf.name_scope(self.transform.name): self.transform.build(None) def get_output_embeddings(self) -> keras.layers.Layer: return self.input_embeddings def set_output_embeddings(self, value: tf.Variable): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self) -> Dict[str, tf.Variable]: return {"bias": self.bias} def set_bias(self, value: tf.Variable): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.transform(hidden_states=hidden_states) seq_length = shape_list(hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Tapas class TFTapasMLMHead(keras.layers.Layer): def __init__(self, config: TapasConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.predictions = TFTapasLMPredictionHead(config, input_embeddings, name="predictions") def call(self, sequence_output: tf.Tensor) -> tf.Tensor: prediction_scores = self.predictions(hidden_states=sequence_output) return prediction_scores def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) @keras_serializable class TFTapasMainLayer(keras.layers.Layer): config_class = TapasConfig def __init__(self, config: TapasConfig, add_pooling_layer: bool = True, **kwargs): requires_backends(self, "tensorflow_probability") super().__init__(**kwargs) self.config = config self.embeddings = TFTapasEmbeddings(config, name="embeddings") self.encoder = TFTapasEncoder(config, name="encoder") self.pooler = TFTapasPooler(config, name="pooler") if add_pooling_layer else None def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape + [len(self.config.type_vocab_sizes)], value=0) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, training=training, ) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFTapasPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = TapasConfig base_model_prefix = "tapas" @property def input_signature(self): return { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.float32, name="attention_mask"), "token_type_ids": tf.TensorSpec((None, None, 7), tf.int32, name="token_type_ids"), } TAPAS_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`TapasConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ TAPAS_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0}, 7)`, *optional*): Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this class for more info. [What are token type IDs?](../glossary#token-type-ids) position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. If `reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be used. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Tapas Model transformer outputting raw hidden-states without any specific head on top.", TAPAS_START_DOCSTRING, ) class TFTapasModel(TFTapasPreTrainedModel): def __init__(self, config: TapasConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.tapas = TFTapasMainLayer(config, name="tapas") @unpack_inputs @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: r""" Returns: Examples: ```python >>> from transformers import AutoTokenizer, TapasModel >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base") >>> model = TapasModel.from_pretrained("google/tapas-base") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" outputs = self.tapas( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "tapas", None) is not None: with tf.name_scope(self.tapas.name): self.tapas.build(None) @add_start_docstrings("""Tapas Model with a `language modeling` head on top.""", TAPAS_START_DOCSTRING) class TFTapasForMaskedLM(TFTapasPreTrainedModel, TFMaskedLanguageModelingLoss): def __init__(self, config: TapasConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if config.is_decoder: logger.warning( "If you want to use `TFTapasForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.tapas = TFTapasMainLayer(config, add_pooling_layer=False, name="tapas") self.lm_head = TFTapasMLMHead(config, input_embeddings=self.tapas.embeddings, name="cls") def get_lm_head(self) -> keras.layers.Layer: return self.lm_head.predictions @unpack_inputs @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, TapasForMaskedLM >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base") >>> model = TapasForMaskedLM.from_pretrained("google/tapas-base") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> inputs = tokenizer( ... table=table, queries="How many [MASK] has George [MASK] played in?", return_tensors="tf" ... ) >>> labels = tokenizer( ... table=table, queries="How many movies has George Clooney played in?", return_tensors="tf" ... )["input_ids"] >>> outputs = model(**inputs, labels=labels) >>> logits = outputs.logits ```""" outputs = self.tapas( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "tapas", None) is not None: with tf.name_scope(self.tapas.name): self.tapas.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None) class TFTapasComputeTokenLogits(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.temperature = config.temperature # cell selection heads with tf.name_scope("output"): self.output_weights = self.add_weight( name="output_weights", shape=(config.hidden_size,), dtype=tf.float32, trainable=True, initializer=tf.zeros_initializer() if config.init_cell_selection_weights_to_zero else keras.initializers.TruncatedNormal(stddev=config.initializer_range), ) self.output_bias = self.add_weight( name="output_bias", shape=(), trainable=True, initializer=tf.zeros_initializer() ) def call(self, sequence_output: tf.Tensor) -> tf.Tensor: """ Computes logits per token Args: sequence_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. Returns: logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Logits per token. """ logits = (tf.einsum("bsj,j->bs", sequence_output, self.output_weights) + self.output_bias) / self.temperature return logits class TFTapasComputeColumnLogits(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) with tf.name_scope("column_output"): self.column_output_weights = self.add_weight( name="column_output_weights", shape=[config.hidden_size], dtype=tf.float32, trainable=True, initializer=tf.zeros_initializer() if config.init_cell_selection_weights_to_zero else keras.initializers.TruncatedNormal(stddev=config.initializer_range), ) self.column_output_bias = self.add_weight( name="column_output_bias", shape=(), trainable=True, initializer=tf.zeros_initializer() ) def call(self, sequence_output, cell_index, cell_mask, allow_empty_column_selection) -> tf.Tensor: """ Computes the column logits. Args: sequence_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. cell_index (`ProductIndexMap`): Index that groups tokens into cells. cell_mask (`tf.Tensor` of shape `(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). allow_empty_column_selection (`bool`): Whether to allow not to select any column Returns: column_logits (`tf.Tensor`of shape `(batch_size, max_num_cols)`): Tensor containing the column logits for every example in the batch. """ # First, compute the token logits (batch_size, seq_len) - without temperature token_logits = tf.einsum("bsj,j->bs", sequence_output, self.column_output_weights) + self.column_output_bias # Next, average the logits per cell (batch_size, max_num_cols*max_num_rows) cell_logits, cell_logits_index = reduce_mean(token_logits, cell_index) # Finally, average the logits per column (batch_size, max_num_cols) column_index = cell_index.project_inner(cell_logits_index) column_logits, out_index = reduce_sum(cell_logits * cell_mask, column_index) cell_count, _ = reduce_sum(cell_mask, column_index) column_logits /= cell_count + EPSILON_ZERO_DIVISION # Mask columns that do not appear in the example. is_padding = tf.logical_and(cell_count < 0.5, tf.not_equal(out_index.indices, 0)) column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(is_padding, tf.float32) if not allow_empty_column_selection: column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(tf.equal(out_index.indices, 0), tf.float32) return column_logits @add_start_docstrings( """ Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables (linear layers on top of the hidden-states output to compute `logits` and optional `logits_aggregation`), e.g. for SQA, WTQ or WikiSQL-supervised tasks. """, TAPAS_START_DOCSTRING, ) class TFTapasForQuestionAnswering(TFTapasPreTrainedModel): def __init__(self, config: TapasConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) # base model self.tapas = TFTapasMainLayer(config, name="tapas") # dropout self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.compute_token_logits = TFTapasComputeTokenLogits(config, name="compute_token_logits") self.compute_column_logits = TFTapasComputeColumnLogits(config, name="compute_column_logits") if config.num_aggregation_labels > 0: self.aggregation_classifier = keras.layers.Dense( config.num_aggregation_labels, kernel_initializer=get_initializer(config.initializer_range), name="aggregation_classifier", ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFTableQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, table_mask: np.ndarray | tf.Tensor | None = None, aggregation_labels: np.ndarray | tf.Tensor | None = None, float_answer: np.ndarray | tf.Tensor | None = None, numeric_values: np.ndarray | tf.Tensor | None = None, numeric_values_scale: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFTableQuestionAnsweringOutput, Tuple[tf.Tensor]]: r""" table_mask (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*): Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and padding are 0. labels (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*): Labels per token for computing the hierarchical cell selection loss. This encodes the positions of the answer appearing in the table. Can be obtained using [`AutoTokenizer`]. - 1 for tokens that are **part of the answer**, - 0 for tokens that are **not part of the answer**. aggregation_labels (`tf.Tensor` of shape `(batch_size, )`, *optional*): Aggregation function index for every example in the batch for computing the aggregation loss. Indices should be in `[0, ..., config.num_aggregation_labels - 1]`. Only required in case of strong supervision for aggregation (WikiSQL-supervised). float_answer (`tf.Tensor` of shape `(batch_size, )`, *optional*): Float answer for every example in the batch. Set to *float('nan')* for cell selection questions. Only required in case of weak supervision (WTQ) to calculate the aggregate mask and regression loss. numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*): Numeric values of every token, NaN for tokens which are not numeric values. Can be obtained using [`AutoTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss. numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*): Scale of the numeric values of every token. Can be obtained using [`AutoTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss. Returns: Examples: ```python >>> from transformers import AutoTokenizer, TapasForQuestionAnswering >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-wtq") >>> model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> logits_aggregation = outputs.logits_aggregation ```""" outputs = self.tapas( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] pooled_output = outputs[1] sequence_output = self.dropout(sequence_output) if input_ids is not None: input_shape = shape_list(input_ids) else: input_shape = shape_list(inputs_embeds)[:-1] # Construct indices for the table. if token_type_ids is None: token_type_ids = tf.fill(input_shape + [len(self.config.type_vocab_sizes)], 0) token_types = [ "segment_ids", "column_ids", "row_ids", "prev_labels", "column_ranks", "inv_column_ranks", "numeric_relations", ] row_ids = token_type_ids[:, :, token_types.index("row_ids")] column_ids = token_type_ids[:, :, token_types.index("column_ids")] # Construct indices for the table. row_index = IndexMap( indices=tf.minimum(tf.cast(row_ids, tf.int32), self.config.max_num_rows - 1), num_segments=self.config.max_num_rows, batch_dims=1, ) col_index = IndexMap( indices=tf.minimum(tf.cast(column_ids, tf.int32), self.config.max_num_columns - 1), num_segments=self.config.max_num_columns, batch_dims=1, ) cell_index = ProductIndexMap(row_index, col_index) # Masks. input_shape = shape_list(input_ids) if input_ids is not None else shape_list(inputs_embeds)[:-1] if attention_mask is None: attention_mask = tf.ones(input_shape) # Table cells only, without question tokens and table headers. if table_mask is None: table_mask = tf.where(row_ids > 0, tf.ones_like(row_ids), tf.zeros_like(row_ids)) # <float32>[batch_size, seq_length] input_mask_float = tf.cast(attention_mask, tf.float32) table_mask_float = tf.cast(table_mask, tf.float32) # Mask for cells that exist in the table (i.e. that are not padding). cell_mask, _ = reduce_mean(input_mask_float, cell_index) # Compute logits per token. These are used to select individual cells. logits = self.compute_token_logits(sequence_output) # Compute logits per column. These are used to select a column. column_logits = None if self.config.select_one_column: column_logits = self.compute_column_logits( sequence_output, cell_index, cell_mask, self.config.allow_empty_column_selection ) # Aggregate logits. logits_aggregation = None if self.config.num_aggregation_labels > 0: logits_aggregation = self.aggregation_classifier(pooled_output) # Total loss calculation total_loss = tf.zeros(shape=(1,), dtype=tf.float32) calculate_loss = False if labels is not None: calculate_loss = True is_supervised = not self.config.num_aggregation_labels > 0 or not self.config.use_answer_as_supervision # Semi-supervised cell selection in case of no aggregation: # If the answer (the denotation) appears directly in the table we might # select the answer without applying any aggregation function. There are # some ambiguous cases, see utils._calculate_aggregate_mask for more info. # `aggregate_mask` is 1 for examples where we chose to aggregate and 0 # for examples where we chose to select the answer directly. # `labels` encodes the positions of the answer appearing in the table. if is_supervised: aggregate_mask = None else: if float_answer is not None: assert ( shape_list(labels)[0] == shape_list(float_answer)[0] ), "Make sure the answers are a FloatTensor of shape (batch_size,)" # <float32>[batch_size] aggregate_mask = _calculate_aggregate_mask( float_answer, pooled_output, self.config.cell_selection_preference, labels, self.aggregation_classifier, ) else: aggregate_mask = None raise ValueError("You have to specify float answers in order to calculate the aggregate mask") # Cell selection log-likelihood if self.config.average_logits_per_cell: logits_per_cell, _ = reduce_mean(logits, cell_index) logits = gather(logits_per_cell, cell_index) dist_per_token = tfp.distributions.Bernoulli(logits=logits) # Compute cell selection loss per example. selection_loss_per_example = None if not self.config.select_one_column: weight = tf.where( labels == 0, tf.ones_like(labels, dtype=tf.float32), self.config.positive_label_weight * tf.ones_like(labels, dtype=tf.float32), ) selection_loss_per_token = -dist_per_token.log_prob(labels) * weight selection_loss_per_example = tf.reduce_sum(selection_loss_per_token * input_mask_float, axis=1) / ( tf.reduce_sum(input_mask_float, axis=1) + EPSILON_ZERO_DIVISION ) else: selection_loss_per_example, logits = _single_column_cell_selection_loss( logits, column_logits, labels, cell_index, col_index, cell_mask ) dist_per_token = tfp.distributions.Bernoulli(logits=logits) # Supervised cell selection if self.config.disable_per_token_loss: pass elif is_supervised: total_loss += tf.reduce_mean(selection_loss_per_example) else: # For the not supervised case, do not assign loss for cell selection total_loss += tf.reduce_mean(selection_loss_per_example * (1.0 - aggregate_mask)) # Semi-supervised regression loss and supervised loss for aggregations if self.config.num_aggregation_labels > 0: if is_supervised: # Note that `aggregate_mask` is None if the setting is supervised. if aggregation_labels is not None: assert ( shape_list(labels)[0] == shape_list(aggregation_labels)[0] ), "Make sure the aggregation labels are a LongTensor of shape (batch_size,)" per_example_additional_loss = _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight, ) else: raise ValueError( "You have to specify aggregation labels in order to calculate the aggregation loss" ) else: aggregation_labels = tf.zeros(shape_list(labels)[0], dtype=tf.int32) per_example_additional_loss = _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight, ) if self.config.use_answer_as_supervision: if numeric_values is not None and numeric_values_scale is not None: assert shape_list(numeric_values) == shape_list(numeric_values_scale) # Add regression loss for numeric answers which require aggregation. answer_loss, large_answer_loss_mask = _calculate_regression_loss( float_answer, aggregate_mask, dist_per_token, numeric_values, numeric_values_scale, table_mask_float, logits_aggregation, self.config, ) per_example_additional_loss += answer_loss # Zero loss for examples with answer_loss > cutoff. per_example_additional_loss *= large_answer_loss_mask else: raise ValueError( "You have to specify numeric values and numeric values scale in order to calculate the" " regression loss" ) total_loss += tf.reduce_mean(per_example_additional_loss) else: # if no label ids are provided, set them to zeros in order to properly compute logits labels = tf.zeros_like(logits) _, logits = _single_column_cell_selection_loss( logits, column_logits, labels, cell_index, col_index, cell_mask ) if not return_dict: output = (logits, logits_aggregation) + outputs[2:] return ((total_loss,) + output) if calculate_loss else output return TFTableQuestionAnsweringOutput( loss=total_loss if calculate_loss else None, logits=logits, logits_aggregation=logits_aggregation, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "tapas", None) is not None: with tf.name_scope(self.tapas.name): self.tapas.build(None) if getattr(self, "compute_token_logits", None) is not None: with tf.name_scope(self.compute_token_logits.name): self.compute_token_logits.build(None) if getattr(self, "compute_column_logits", None) is not None: with tf.name_scope(self.compute_column_logits.name): self.compute_column_logits.build(None) if getattr(self, "aggregation_classifier", None) is not None: with tf.name_scope(self.aggregation_classifier.name): self.aggregation_classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table entailment tasks, such as TabFact (Chen et al., 2020). """, TAPAS_START_DOCSTRING, ) class TFTapasForSequenceClassification(TFTapasPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: TapasConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.tapas = TFTapasMainLayer(config, name="tapas") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name="dropout") self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Note: this is called "classification_class_index" in the original implementation. Returns: Examples: ```python >>> from transformers import AutoTokenizer, TapasForSequenceClassification >>> import tensorflow as tf >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-tabfact") >>> model = TapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = [ ... "There is only one actor who is 45 years old", ... "There are 3 actors which played in more than 60 movies", ... ] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf") >>> labels = tf.convert_to_tensor([1, 0]) # 1 means entailed, 0 means refuted >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits ```""" outputs = self.tapas( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=pooled_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "tapas", None) is not None: with tf.name_scope(self.tapas.name): self.tapas.build(None) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) """ TAPAS utilities.""" class AverageApproximationFunction(str, enum.Enum): RATIO = "ratio" FIRST_ORDER = "first_order" SECOND_ORDER = "second_order" # Beginning of everything related to segmented tensors class IndexMap(object): """Index grouping entries within a tensor.""" def __init__(self, indices, num_segments, batch_dims=0): """ Creates an index. Args: indices: <int32> Tensor of indices, same shape as `values`. num_segments: <int32> Scalar tensor, the number of segments. All elements in a batched segmented tensor must have the same number of segments (although many segments can be empty). batch_dims: Python integer, the number of batch dimensions. The first `batch_dims` dimensions of a SegmentedTensor are treated as batch dimensions. Segments in different batch elements are always distinct even if they have the same index. """ self.indices = tf.convert_to_tensor(indices) self.num_segments = tf.convert_to_tensor(num_segments) self.batch_dims = batch_dims def batch_shape(self): return tf.shape(self.indices)[: self.batch_dims] class ProductIndexMap(IndexMap): """The product of two indices.""" def __init__(self, outer_index, inner_index): """ Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has `num_segments` equal to `outer_index.num_segements` * `inner_index.num_segments`. Args: outer_index: IndexMap. inner_index: IndexMap, must have the same shape as `outer_index`. """ if outer_index.batch_dims != inner_index.batch_dims: raise ValueError("outer_index.batch_dims and inner_index.batch_dims must be the same.") super(ProductIndexMap, self).__init__( indices=( inner_index.indices + outer_index.indices * tf.cast(inner_index.num_segments, inner_index.indices.dtype) ), num_segments=inner_index.num_segments * outer_index.num_segments, batch_dims=inner_index.batch_dims, ) self.outer_index = outer_index self.inner_index = inner_index def project_outer(self, index): """Projects an index with the same index set onto the outer components.""" return IndexMap( indices=tf.math.floordiv(index.indices, self.inner_index.num_segments), num_segments=self.outer_index.num_segments, batch_dims=index.batch_dims, ) def project_inner(self, index): """Projects an index with the same index set onto the inner components.""" return IndexMap( indices=tf.math.floormod(index.indices, self.inner_index.num_segments), num_segments=self.inner_index.num_segments, batch_dims=index.batch_dims, ) def gather(values, index, name="segmented_gather"): """ Gathers from `values` using the index map. For each element in the domain of the index map this operation looks up a value for that index in `values`. Two elements from the same segment always get assigned the same value. Args: values: [B1, ..., Bn, num_segments, V1, ...] Tensor with segment values. index: [B1, ..., Bn, I1, ..., Ik] IndexMap. name: Name for the TensorFlow operation. Returns: [B1, ..., Bn, I1, ..., Ik, V1, ...] Tensor with the gathered values. """ return tf.gather(values, index.indices, batch_dims=index.batch_dims, name=name) def flatten(index, name="segmented_flatten"): """ Flattens a batched index map to a 1d index map. This operation relabels the segments to keep batch elements distinct. The k-th batch element will have indices shifted by `num_segments` * (k - 1). The result is a tensor with `num_segments` multiplied by the number of elements in the batch. Args: index: IndexMap to flatten. name: Name for the TensorFlow operation. Returns: The flattened IndexMap. """ batch_size = tf.reduce_prod(index.batch_shape()) offset = tf.range(batch_size) * index.num_segments offset = tf.reshape(offset, index.batch_shape()) for _ in range(index.batch_dims, index.indices.shape.rank): offset = tf.expand_dims(offset, -1) indices = tf.cast(offset, index.indices.dtype) + index.indices return IndexMap(indices=tf.reshape(indices, [-1]), num_segments=index.num_segments * batch_size, batch_dims=0) def range_index_map(batch_shape, num_segments, name="range_index_map"): """ Constructs an index map equal to range(num_segments). Args: batch_shape (`tf.Tensor`): Batch shape num_segments (`int`): Number of segments name (`str`, *optional*, defaults to 'range_index_map'): Name for the operation. Currently not used Returns: (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments). """ batch_shape = tf.convert_to_tensor(batch_shape) batch_shape.shape.assert_has_rank(1) num_segments = tf.convert_to_tensor(num_segments) num_segments.shape.assert_has_rank(0) indices = tf.range(num_segments) shape = tf.concat([tf.ones_like(batch_shape, dtype=tf.int32), tf.expand_dims(num_segments, axis=0)], axis=0) indices = tf.reshape(indices, shape) multiples = tf.concat([batch_shape, [1]], axis=0) indices = tf.tile(indices, multiples) return IndexMap(indices=indices, num_segments=num_segments, batch_dims=batch_shape.shape.as_list()[0]) def _segment_reduce(values, index, segment_reduce_fn, name): """ Applies a segment reduction segment-wise. Args: values (`tf.Tensor`): Tensor with segment values. index (`IndexMap`): IndexMap. segment_reduce_fn (`str`): Name for the reduce operation. One of "sum", "mean", "max" or "min". name (`str`): Name for the operation. Currently not used Returns: (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments). """ # Flatten the batch dimensions, as segments ops do not support batching. # However if `values` has extra dimensions to the right keep them # unflattened. Segmented ops support vector-valued operations. flat_index = flatten(index) vector_shape = tf.shape(values)[index.indices.shape.rank :] flattened_shape = tf.concat([[-1], vector_shape], axis=0) flat_values = tf.reshape(values, flattened_shape) segment_means = segment_reduce_fn( data=flat_values, segment_ids=flat_index.indices, num_segments=flat_index.num_segments ) # Unflatten the values. new_shape = tf.concat([index.batch_shape(), [index.num_segments], vector_shape], axis=0) output_values = tf.reshape(segment_means, new_shape) output_index = range_index_map(index.batch_shape(), index.num_segments) return output_values, output_index def reduce_mean(values, index, name="segmented_reduce_mean"): """ Averages a tensor over its segments. Outputs 0 for empty segments. This operations computes the mean over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present the output will be a mean of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be averaged. index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments. name: Name for the TensorFlow ops. Returns: A pair (output_values, output_index) where `output_values` is a tensor of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, tf.math.unsorted_segment_mean, name) def reduce_sum(values, index, name="segmented_reduce_sum"): """ Sums a tensor over its segments. Outputs 0 for empty segments. This operations computes the sum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present the output will be a sum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be averaged. index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments. name: Name for the TensorFlow ops. Returns: A pair (output_values, output_index) where `output_values` is a tensor of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, tf.math.unsorted_segment_sum, name) def reduce_max(values, index, name="segmented_reduce_max"): """ Computes the maximum over segments. This operations computes the maximum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present the output will be an element-wise maximum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be averaged. index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments. name: Name for the TensorFlow ops. Returns: A pair (output_values, output_index) where `output_values` is a tensor of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, tf.math.unsorted_segment_max, name) def reduce_min(values, index, name="segmented_reduce_min"): """Computes the minimum over segments.""" return _segment_reduce(values, index, tf.math.unsorted_segment_min, name) def _single_column_cell_selection_loss(token_logits, column_logits, labels, cell_index, col_index, cell_mask): """ Computes the loss for cell selection constrained to a single column. The loss is a hierarchical log-likelihood. The model first predicts a column and then selects cells within that column (conditioned on the column). Cells outside the selected column are never selected. Args: token_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Tensor containing the logits per token. column_logits (`tf.Tensor` of shape `(batch_size, max_num_cols)`): Tensor containing the logits per column. labels (`tf.Tensor` of shape `(batch_size, sequence_length)`): Labels per token. cell_index (`ProductIndexMap`): Index that groups tokens into cells. col_index (`IndexMap`): Index that groups tokens into columns. cell_mask (`tf.Tensor` of shape `(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). Returns: selection_loss_per_example (`tf.Tensor` of shape `(batch_size,)`): Loss for each example. logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): New logits which are only allowed to select cells in a single column. Logits outside of the most likely column according to *column_logits* will be set to a very low value (such that the probabilities are 0). """ # First find the column we should select. We use the column with maximum # number of selected cells. labels_per_column, _ = reduce_sum(tf.cast(labels, tf.float32), col_index) column_label = tf.argmax(labels_per_column, axis=-1, output_type=tf.int32) # Check if there are no selected cells in the column. In that case the model # should predict the special column id 0, which means "select nothing". no_cell_selected = tf.equal(tf.reduce_max(labels_per_column, axis=-1), 0) column_label = tf.where(no_cell_selected, tf.zeros_like(column_label), column_label) column_dist = tfp.distributions.Categorical(logits=column_logits) column_loss_per_example = -column_dist.log_prob(column_label) # Reduce the labels and logits to per-cell from per-token. logits_per_cell, _ = reduce_mean(token_logits, cell_index) labels_per_cell, labels_index = reduce_max(tf.cast(labels, tf.int32), cell_index) # Mask for the selected column. column_id_for_cells = cell_index.project_inner(labels_index).indices column_mask = tf.cast(tf.equal(column_id_for_cells, tf.expand_dims(column_label, axis=1)), tf.float32) # Compute the log-likelihood for cells, but only for the selected column. cell_dist = tfp.distributions.Bernoulli(logits=logits_per_cell) cell_log_prob = cell_dist.log_prob(labels_per_cell) cell_loss = -tf.reduce_sum(cell_log_prob * column_mask * cell_mask, axis=1) # We need to normalize the loss by the number of cells in the column. cell_loss /= tf.reduce_sum(column_mask * cell_mask, axis=1) + EPSILON_ZERO_DIVISION selection_loss_per_example = column_loss_per_example selection_loss_per_example += tf.where(no_cell_selected, tf.zeros_like(selection_loss_per_example), cell_loss) # Set the probs outside the selected column (selected by the *model*) # to 0. This ensures backwards compatibility with models that select # cells from multiple columns. selected_column_id = tf.argmax(column_logits, axis=-1, output_type=tf.int32) selected_column_mask = tf.cast( tf.equal(column_id_for_cells, tf.expand_dims(selected_column_id, axis=-1)), tf.float32 ) # Never select cells with the special column id 0. selected_column_mask = tf.where( tf.equal(column_id_for_cells, 0), tf.zeros_like(selected_column_mask), selected_column_mask ) logits_per_cell += CLOSE_ENOUGH_TO_LOG_ZERO * (1.0 - cell_mask * selected_column_mask) logits = gather(logits_per_cell, cell_index) return selection_loss_per_example, logits def _calculate_aggregate_mask(answer, pooled_output, cell_selection_preference, labels, aggregation_classifier): """ Finds examples where the model should select cells with no aggregation. Returns a mask that determines for which examples should the model select answers directly from the table, without any aggregation function. If the answer is a piece of text the case is unambiguous as aggregation functions only apply to numbers. If the answer is a number but does not appear in the table then we must use some aggregation case. The ambiguous case is when the answer is a number that also appears in the table. In this case we use the aggregation function probabilities predicted by the model to decide whether to select or aggregate. The threshold for this is a hyperparameter *cell_selection_preference* Args: answer (`tf.Tensor` of shape `(batch_size, )`): Answer for every example in the batch. Nan if there is no scalar answer. pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Output of the pooler (BertPooler) on top of the encoder layer. cell_selection_preference (`float`): Preference for cell selection in ambiguous cases. labels (`tf.Tensor` of shape `(batch_size, sequence_length)`): Labels per token. aggregation_classifier (`torch.nn.Linear`): Aggregation head Returns: aggregate_mask (`tf.Tensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use aggregation functions. """ # tf.Tensor(batch_size,) aggregate_mask_init = tf.cast(tf.logical_not(tf.math.is_nan(answer)), tf.float32) logits_aggregation = aggregation_classifier(pooled_output) dist_aggregation = tfp.distributions.Categorical(logits=logits_aggregation) # Index 0 corresponds to "no aggregation". aggregation_ops_total_mass = tf.reduce_sum(dist_aggregation.probs_parameter()[:, 1:], axis=1) # Cell selection examples according to current model. is_pred_cell_selection = aggregation_ops_total_mass <= cell_selection_preference # Examples with non-empty cell selection supervision. is_cell_supervision_available = tf.reduce_sum(labels, axis=1) > 0 aggregate_mask = tf.where( tf.logical_and(is_pred_cell_selection, is_cell_supervision_available), tf.zeros_like(aggregate_mask_init, dtype=tf.float32), aggregate_mask_init, ) aggregate_mask = tf.stop_gradient(aggregate_mask) return aggregate_mask def _calculate_aggregation_loss_known( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels ): """ Calculates aggregation loss when its type is known during training. In the weakly supervised setting, the only known information is that for cell selection examples, "no aggregation" should be predicted. For other examples (those that require aggregation), no loss is accumulated. In the setting where aggregation type is always known, standard cross entropy loss is accumulated for all examples Args: logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`tf.Tensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (`tf.Tensor` of shape `(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (`bool`, *optional*): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (`int`, *optional*, defaults to 0): The number of aggregation operators to predict. Returns: aggregation_loss_known (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss (when its type is known during training) per example. """ if use_answer_as_supervision: # Prepare "no aggregation" targets for cell selection examples. target_aggregation = tf.zeros_like(aggregate_mask, dtype=tf.int32) else: # Use aggregation supervision as the target. target_aggregation = aggregation_labels one_hot_labels = tf.one_hot(target_aggregation, depth=num_aggregation_labels, dtype=tf.float32) log_probs = tf.nn.log_softmax(logits_aggregation, axis=-1) # <float32>[batch_size] per_example_aggregation_intermediate = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) if use_answer_as_supervision: # Accumulate loss only for examples requiring cell selection # (no aggregation). return per_example_aggregation_intermediate * (1 - aggregate_mask) else: return per_example_aggregation_intermediate def _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask): """ Calculates aggregation loss in the case of answer supervision. Args: logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`tf.Tensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions Returns: aggregation_loss_unknown (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss (in case of answer supervision) per example. """ dist_aggregation = tfp.distributions.Categorical(logits=logits_aggregation) # Index 0 corresponds to "no aggregation". aggregation_ops_total_mass = tf.reduce_sum(dist_aggregation.probs_parameter()[:, 1:], axis=1) # Predict some aggregation in case of an answer that needs aggregation. # This increases the probability of all aggregation functions, in a way # similar to MML, but without considering whether the function gives the # correct answer. return -tf.math.log(aggregation_ops_total_mass) * aggregate_mask def _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels, aggregation_loss_weight, ): """ Calculates the aggregation loss per example. Args: logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`tf.Tensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (`tf.Tensor` of shape `(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (`bool`, *optional*): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (`int`, *optional*, defaults to 0): The number of aggregation operators to predict. aggregation_loss_weight (`float`, *optional*, defaults to 1.0): Importance weight for the aggregation loss. Returns: aggregation_loss (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss per example. """ per_example_aggregation_loss = _calculate_aggregation_loss_known( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels ) if use_answer_as_supervision: # Add aggregation loss for numeric answers that need aggregation. per_example_aggregation_loss += _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask) return aggregation_loss_weight * per_example_aggregation_loss def _calculate_expected_result( dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config ): """ Calculates the expected result given cell and aggregation probabilities. Args: dist_per_cell (`tfp.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (`tf.Tensor` of shape `(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config ([`TapasConfig`]): Model configuration class with all the hyperparameters of the model Returns: expected_result (`tf.Tensor` of shape `(batch_size,)`): The expected result per example. """ if config.use_gumbel_for_cells: gumbel_dist = tfp.distributions.RelaxedBernoulli( # The token logits where already divided by the temperature and used for # computing cell selection errors so we need to multiply it again here config.temperature, logits=dist_per_cell.logits_parameter() * config.temperature, ) scaled_probability_per_cell = gumbel_dist.sample() else: scaled_probability_per_cell = dist_per_cell.probs_parameter() # <float32>[batch_size, seq_length] scaled_probability_per_cell = (scaled_probability_per_cell / numeric_values_scale) * input_mask_float count_result = tf.reduce_sum(scaled_probability_per_cell, axis=1) numeric_values_masked = tf.where( tf.math.is_nan(numeric_values), tf.zeros_like(numeric_values), numeric_values ) # Mask non-numeric table values to zero. sum_result = tf.reduce_sum(scaled_probability_per_cell * numeric_values_masked, axis=1) avg_approximation = config.average_approximation_function if avg_approximation == AverageApproximationFunction.RATIO: average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION) elif avg_approximation == AverageApproximationFunction.FIRST_ORDER: # The sum of all probabilities exept that correspond to other cells ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) - scaled_probability_per_cell + 1 average_result = tf.reduce_sum(numeric_values_masked * scaled_probability_per_cell / ex, axis=1) elif avg_approximation == AverageApproximationFunction.SECOND_ORDER: # The sum of all probabilities exept that correspond to other cells ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) - scaled_probability_per_cell + 1 pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell) var = tf.reduce_sum(pointwise_var, axis=1, keepdims=True) - pointwise_var multiplier = (var / tf.math.square(ex) + 1) / ex average_result = tf.reduce_sum(numeric_values_masked * scaled_probability_per_cell * multiplier, axis=1) else: raise ValueError("Invalid average_approximation_function: %s", config.average_approximation_function) if config.use_gumbel_for_aggregation: gumbel_dist = tfp.distributions.RelaxedOneHotCategorical( config.aggregation_temperature, logits=logits_aggregation[:, 1:] ) # <float32>[batch_size, num_aggregation_labels - 1] aggregation_op_only_probs = gumbel_dist.sample() else: # <float32>[batch_size, num_aggregation_labels - 1] aggregation_op_only_probs = stable_softmax(logits_aggregation[:, 1:] / config.aggregation_temperature, axis=-1) all_results = tf.concat( [ tf.expand_dims(sum_result, axis=1), tf.expand_dims(average_result, axis=1), tf.expand_dims(count_result, axis=1), ], axis=1, ) expected_result = tf.reduce_sum(all_results * aggregation_op_only_probs, axis=1) return expected_result def _calculate_regression_loss( answer, aggregate_mask, dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config, ): """ Calculates the regression loss per example. Args: answer (`tf.Tensor` of shape `(batch_size,)`): Answer for every example in the batch. Nan if there is no scalar answer. aggregate_mask (`tf.Tensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use aggregation functions. dist_per_cell (`torch.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (`tf.Tensor` of shape `(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config ([`TapasConfig`]): Model configuration class with all the parameters of the model Returns: per_example_answer_loss_scaled (`tf.Tensor` of shape `(batch_size,)`): Scales answer loss for each example in the batch. large_answer_loss_mask (`tf.Tensor` of shape `(batch_size,)`): A mask which is 1 for examples for which their answer loss is larger than the answer_loss_cutoff. """ # float32 (batch_size,) expected_result = _calculate_expected_result( dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config ) # <float32>[batch_size] answer_masked = tf.where(tf.math.is_nan(answer), tf.zeros_like(answer), answer) if config.use_normalized_answer_loss: normalizer = tf.stop_gradient( tf.math.maximum(tf.math.abs(expected_result), tf.math.abs(answer_masked)) + EPSILON_ZERO_DIVISION ) normalized_answer_masked = answer_masked / normalizer normalized_expected_result = expected_result / normalizer per_example_answer_loss = tf.compat.v1.losses.huber_loss( normalized_answer_masked * aggregate_mask, normalized_expected_result * aggregate_mask, delta=tf.cast(1.0, tf.float32), reduction=tf.losses.Reduction.NONE, ) else: per_example_answer_loss = tf.compat.v1.losses.huber_loss( answer_masked * aggregate_mask, expected_result * aggregate_mask, delta=tf.cast(config.huber_loss_delta, tf.float32), reduction=tf.losses.Reduction.NONE, ) if config.answer_loss_cutoff is None: large_answer_loss_mask = tf.ones_like(per_example_answer_loss, dtype=tf.float32) else: large_answer_loss_mask = tf.where( per_example_answer_loss > config.answer_loss_cutoff, tf.zeros_like(per_example_answer_loss, dtype=tf.float32), tf.ones_like(per_example_answer_loss, dtype=tf.float32), ) per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask) return per_example_answer_loss_scaled, large_answer_loss_mask
transformers/src/transformers/models/tapas/modeling_tf_tapas.py/0
{ "file_path": "transformers/src/transformers/models/tapas/modeling_tf_tapas.py", "repo_id": "transformers", "token_count": 47664 }
348
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for TrOCR. """ import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class TrOCRProcessor(ProcessorMixin): r""" Constructs a TrOCR processor which wraps a vision image processor and a TrOCR tokenizer into a single processor. [`TrOCRProcessor`] offers all the functionalities of [`ViTImageProcessor`/`DeiTImageProcessor`] and [`RobertaTokenizer`/`XLMRobertaTokenizer`]. See the [`~TrOCRProcessor.__call__`] and [`~TrOCRProcessor.decode`] for more information. Args: image_processor ([`ViTImageProcessor`/`DeiTImageProcessor`], *optional*): An instance of [`ViTImageProcessor`/`DeiTImageProcessor`]. The image processor is a required input. tokenizer ([`RobertaTokenizer`/`XLMRobertaTokenizer`], *optional*): An instance of [`RobertaTokenizer`/`XLMRobertaTokenizer`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = "AutoTokenizer" def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor self._in_target_context_manager = False def __call__(self, *args, **kwargs): """ When used in normal mode, this method forwards all its arguments to AutoImageProcessor's [`~AutoImageProcessor.__call__`] and returns its output. If used in the context [`~TrOCRProcessor.as_target_processor`] this method forwards all its arguments to TrOCRTokenizer's [`~TrOCRTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information. """ # For backward compatibility if self._in_target_context_manager: return self.current_processor(*args, **kwargs) images = kwargs.pop("images", None) text = kwargs.pop("text", None) if len(args) > 0: images = args[0] args = args[1:] if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process.") if images is not None: inputs = self.image_processor(images, *args, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif images is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @contextmanager def as_target_processor(self): """ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR. """ warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your images inputs, or in a separate call." ) self._in_target_context_manager = True self.current_processor = self.tokenizer yield self.current_processor = self.image_processor self._in_target_context_manager = False @property def feature_extractor_class(self): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", FutureWarning, ) return self.image_processor_class @property def feature_extractor(self): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", FutureWarning, ) return self.image_processor
transformers/src/transformers/models/trocr/processing_trocr.py/0
{ "file_path": "transformers/src/transformers/models/trocr/processing_trocr.py", "repo_id": "transformers", "token_count": 2190 }
349
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ConvNext + UperNet checkpoints from mmsegmentation.""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def get_upernet_config(model_name): auxiliary_in_channels = 384 if "tiny" in model_name: depths = [3, 3, 9, 3] hidden_sizes = [96, 192, 384, 768] if "small" in model_name: depths = [3, 3, 27, 3] hidden_sizes = [96, 192, 384, 768] if "base" in model_name: depths = [3, 3, 27, 3] hidden_sizes = [128, 256, 512, 1024] auxiliary_in_channels = 512 if "large" in model_name: depths = [3, 3, 27, 3] hidden_sizes = [192, 384, 768, 1536] auxiliary_in_channels = 768 if "xlarge" in model_name: depths = [3, 3, 27, 3] hidden_sizes = [256, 512, 1024, 2048] auxiliary_in_channels = 1024 # set label information num_labels = 150 repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} backbone_config = ConvNextConfig( depths=depths, hidden_sizes=hidden_sizes, out_features=["stage1", "stage2", "stage3", "stage4"] ) config = UperNetConfig( backbone_config=backbone_config, auxiliary_in_channels=auxiliary_in_channels, num_labels=num_labels, id2label=id2label, label2id=label2id, ) return config # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config): rename_keys = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight")) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias")) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight")) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias")) # stages for i in range(len(config.backbone_config.depths)): for j in range(config.backbone_config.depths[i]): rename_keys.append((f"backbone.stages.{i}.{j}.gamma", f"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter")) rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.weight", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight")) rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.bias", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias")) rename_keys.append((f"backbone.stages.{i}.{j}.norm.weight", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight")) rename_keys.append((f"backbone.stages.{i}.{j}.norm.bias", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias")) rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight")) rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias")) rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight")) rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias")) if i > 0: rename_keys.append((f"backbone.downsample_layers.{i}.0.weight", f"backbone.encoder.stages.{i}.downsampling_layer.0.weight")) rename_keys.append((f"backbone.downsample_layers.{i}.0.bias", f"backbone.encoder.stages.{i}.downsampling_layer.0.bias")) rename_keys.append((f"backbone.downsample_layers.{i}.1.weight", f"backbone.encoder.stages.{i}.downsampling_layer.1.weight")) rename_keys.append((f"backbone.downsample_layers.{i}.1.bias", f"backbone.encoder.stages.{i}.downsampling_layer.1.bias")) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight")) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias")) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def convert_upernet_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub): model_name_to_url = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } checkpoint_url = model_name_to_url[model_name] state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["state_dict"] config = get_upernet_config(model_name) model = UperNetForSemanticSegmentation(config) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): val = state_dict.pop(key) if "bn" in key: key = key.replace("bn", "batch_norm") state_dict[key] = val # rename keys rename_keys = create_rename_keys(config) for src, dest in rename_keys: rename_key(state_dict, src, dest) model.load_state_dict(state_dict) # verify on image url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") processor = SegformerImageProcessor() pixel_values = processor(image, return_tensors="pt").pixel_values with torch.no_grad(): outputs = model(pixel_values) if model_name == "upernet-convnext-tiny": expected_slice = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": expected_slice = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": expected_slice = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": expected_slice = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": expected_slice = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print("Logits:", outputs.logits[0, 0, :3, :3]) assert torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving processor to {pytorch_dump_folder_path}") processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub") model.push_to_hub(f"openmmlab/{model_name}") processor.push_to_hub(f"openmmlab/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-convnext-tiny", type=str, choices=[f"upernet-convnext-{size}" for size in ["tiny", "small", "base", "large", "xlarge"]], help="Name of the ConvNext UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/upernet/convert_convnext_upernet_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/upernet/convert_convnext_upernet_to_pytorch.py", "repo_id": "transformers", "token_count": 4521 }
350
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ VisualBERT model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json", "uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json", "uclanlp/visualbert-vqa-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json" ), "uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json", "uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json", "uclanlp/visualbert-vcr-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json" ), "uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json", "uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json", "uclanlp/visualbert-nlvr2-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json" ), # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class VisualBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VisualBertModel`]. It is used to instantiate an VisualBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the VisualBERT [uclanlp/visualbert-vqa-coco-pre](https://huggingface.co/uclanlp/visualbert-vqa-coco-pre) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the VisualBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`VisualBertModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the `inputs_ids` passed to the forward method of [`VisualBertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. visual_embedding_dim (`int`, *optional*, defaults to 512): Dimensionality of the visual embeddings to be passed to the model. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`VisualBertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. bypass_transformer (`bool`, *optional*, defaults to `False`): Whether or not the model should bypass the transformer for the visual embeddings. If set to `True`, the model directly concatenates the visual embeddings from [`VisualBertEmbeddings`] with text output from transformers, and then pass it to a self-attention layer. special_visual_initialize (`bool`, *optional*, defaults to `True`): Whether or not the visual token type and position type embedding weights should be initialized the same as the textual token type and positive type embeddings. When set to `True`, the weights of the textual token type and position type embeddings are copied to the respective visual embedding layers. Example: ```python >>> from transformers import VisualBertConfig, VisualBertModel >>> # Initializing a VisualBERT visualbert-vqa-coco-pre style configuration >>> configuration = VisualBertConfig.from_pretrained("uclanlp/visualbert-vqa-coco-pre") >>> # Initializing a model (with random weights) from the visualbert-vqa-coco-pre style configuration >>> model = VisualBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "visual_bert" def __init__( self, vocab_size=30522, hidden_size=768, visual_embedding_dim=512, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, bypass_transformer=False, special_visual_initialize=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.visual_embedding_dim = visual_embedding_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.bypass_transformer = bypass_transformer self.special_visual_initialize = special_visual_initialize
transformers/src/transformers/models/visual_bert/configuration_visual_bert.py/0
{ "file_path": "transformers/src/transformers/models/visual_bert/configuration_visual_bert.py", "repo_id": "transformers", "token_count": 2970 }
351
# coding=utf-8 # Copyright 2022 Google AI, Ross Wightman, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch ViT Hybrid model.""" import collections.abc import math from typing import Dict, List, Optional, Set, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from ...utils.backbone_utils import load_backbone from .configuration_vit_hybrid import ViTHybridConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "ViTHybridConfig" # Base docstring _CHECKPOINT_FOR_DOC = "google/vit-hybrid-base-bit-384" _EXPECTED_OUTPUT_SHAPE = [1, 197, 768] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "google/vit-hybrid-base-bit-384" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/vit-hybrid-base-bit-384", # See all ViT hybrid models at https://huggingface.co/models?filter=vit-hybrid ] class ViTHybridEmbeddings(nn.Module): """ Construct the CLS token, position and patch embeddings. Optionally, also the mask token. """ # Copied from transformers.models.vit.modeling_vit.ViTEmbeddings.__init__ with ViT->ViTHybrid def __init__(self, config: ViTHybridConfig, use_mask_token: bool = False) -> None: super().__init__() self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size)) self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None self.patch_embeddings = ViTHybridPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. Source: https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 if num_patches == num_positions and height == width: return self.position_embeddings class_pos_embed = self.position_embeddings[:, 0] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] height = height // self.config.patch_size width = width // self.config.patch_size # we add a small number to avoid floating point error in the interpolation # see discussion at https://github.com/facebookresearch/dino/issues/8 height, width = height + 0.1, width + 0.1 patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, scale_factor=(height / math.sqrt(num_positions), width / math.sqrt(num_positions)), mode="bicubic", align_corners=False, ) if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]: raise ValueError(f"Invalid height or width: {height}, {width}") patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) def forward( self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: bool = False, ) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) if bool_masked_pos is not None: seq_length = embeddings.shape[1] mask_tokens = self.mask_token.expand(batch_size, seq_length, -1) # replace the masked visual tokens by mask_tokens mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask # add the [CLS] token to the embedded patch tokens cls_tokens = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings class ViTHybridPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config, feature_size=None): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) self.backbone = load_backbone(config) if self.backbone.config.model_type != "bit": raise ValueError(f"Backbone model type {self.backbone.model_type} is not supported.") feature_dim = self.backbone.channels[-1] if feature_size is None: feature_map = config.backbone_featmap_shape feature_size = feature_map[-2:] feature_dim = feature_map[1] else: feature_size = ( feature_size if isinstance(feature_size, collections.abc.Iterable) else (feature_size, feature_size) ) feature_dim = self.backbone.channels[-1] self.grid_size = (feature_size[0] // patch_size[0], feature_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.projection = nn.Conv2d(feature_dim, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: _, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) features = self.backbone(pixel_values).feature_maps[-1] embeddings = self.projection(features).flatten(2).transpose(1, 2) return embeddings # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->ViTHybrid class ViTHybridSelfAttention(nn.Module): def __init__(self, config: ViTHybridConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->ViTHybrid class ViTHybridSelfOutput(nn.Module): """ The residual connection is defined in ViTHybridLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: ViTHybridConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->ViTHybrid class ViTHybridAttention(nn.Module): def __init__(self, config: ViTHybridConfig) -> None: super().__init__() self.attention = ViTHybridSelfAttention(config) self.output = ViTHybridSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->ViTHybrid class ViTHybridIntermediate(nn.Module): def __init__(self, config: ViTHybridConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->ViTHybrid class ViTHybridOutput(nn.Module): def __init__(self, config: ViTHybridConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class ViTHybridLayer(nn.Module): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: ViTHybridConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ViTHybridAttention(config) self.intermediate = ViTHybridIntermediate(config) self.output = ViTHybridOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in ViTHybrid, layernorm is applied before self-attention head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection # We assign to correct device for `accelerate`, check: https://github.com/huggingface/transformers/pull/20705/ hidden_states = attention_output + hidden_states.to(attention_output.device) # in ViTHybrid, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs # Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->ViTHybrid class ViTHybridEncoder(nn.Module): def __init__(self, config: ViTHybridConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([ViTHybridLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) # Copied from transformers.models.vit.modeling_vit.ViTPreTrainedModel with ViT->ViTHybrid class ViTHybridPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ViTHybridConfig base_model_prefix = "vit" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["ViTHybridEmbeddings", "ViTHybridLayer"] def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_( module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range ).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, ViTHybridEmbeddings): module.position_embeddings.data = nn.init.trunc_normal_( module.position_embeddings.data.to(torch.float32), mean=0.0, std=self.config.initializer_range, ).to(module.position_embeddings.dtype) module.cls_token.data = nn.init.trunc_normal_( module.cls_token.data.to(torch.float32), mean=0.0, std=self.config.initializer_range, ).to(module.cls_token.dtype) VIT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ViTHybridConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ VIT_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTHybridImageProcessor.__call__`] for details. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare ViT Hybrid Model transformer outputting raw hidden-states without any specific head on top.", VIT_START_DOCSTRING, ) # Copied from transformers.models.vit.modeling_vit.ViTModel with ViT->ViTHybrid class ViTHybridModel(ViTHybridPreTrainedModel): def __init__(self, config: ViTHybridConfig, add_pooling_layer: bool = True, use_mask_token: bool = False): super().__init__(config) self.config = config self.embeddings = ViTHybridEmbeddings(config, use_mask_token=use_mask_token) self.encoder = ViTHybridEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = ViTHybridPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> ViTHybridPatchEmbeddings: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) # TODO: maybe have a cleaner way to cast the input (from `ImageProcessor` side?) expected_dtype = self.embeddings.patch_embeddings.projection.weight.dtype if pixel_values.dtype != expected_dtype: pixel_values = pixel_values.to(expected_dtype) embedding_output = self.embeddings( pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding ) encoder_outputs = self.encoder( embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,) return head_outputs + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) # Copied from transformers.models.vit.modeling_vit.ViTPooler with ViT->ViTHybrid class ViTHybridPooler(nn.Module): def __init__(self, config: ViTHybridConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output @add_start_docstrings( """ ViT Hybrid Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. """, VIT_START_DOCSTRING, ) # Copied from transformers.models.vit.modeling_vit.ViTForImageClassification with ViT->ViTHybrid class ViTHybridForImageClassification(ViTHybridPreTrainedModel): def __init__(self, config: ViTHybridConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.vit = ViTHybridModel(config, add_pooling_layer=False) # Classifier head self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.vit( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output[:, 0, :]) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/vit_hybrid/modeling_vit_hybrid.py/0
{ "file_path": "transformers/src/transformers/models/vit_hybrid/modeling_vit_hybrid.py", "repo_id": "transformers", "token_count": 13320 }
352
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for ViTMatte.""" from typing import List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import pad, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging logger = logging.get_logger(__name__) class VitMatteImageProcessor(BaseImageProcessor): r""" Constructs a ViTMatte image processor. Args: do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image to make the width and height divisible by `size_divisibility`. Can be overridden by the `do_pad` parameter in the `preprocess` method. size_divisibility (`int`, *optional*, defaults to 32): The width and height of the image will be padded to be divisible by this number. """ model_input_names = ["pixel_values"] def __init__( self, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: bool = True, size_divisibility: int = 32, **kwargs, ) -> None: super().__init__(**kwargs) self.do_rescale = do_rescale self.do_normalize = do_normalize self.do_pad = do_pad self.rescale_factor = rescale_factor self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD self.size_divisibility = size_divisibility def pad_image( self, image: np.ndarray, size_divisibility: int = 32, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Args: image (`np.ndarray`): Image to pad. size_divisibility (`int`, *optional*, defaults to 32): The width and height of the image will be padded to be divisible by this number. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(image) height, width = get_image_size(image, input_data_format) if height % size_divisibility != 0 or width % size_divisibility != 0: pad_height = size_divisibility - height % size_divisibility pad_width = size_divisibility - width % size_divisibility padding = ((0, pad_height), (0, pad_width)) image = pad(image, padding=padding, data_format=data_format, input_data_format=input_data_format) if data_format is not None: image = to_channel_dimension_format(image, data_format, input_data_format) return image def preprocess( self, images: ImageInput, trimaps: ImageInput, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, size_divisibility: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. trimaps (`ImageInput`): Trimap to preprocess. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use if `do_normalize` is set to `True`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image. size_divisibility (`int`, *optional*, defaults to `self.size_divisibility`): The size divisibility to pad the image to if `do_pad` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_rescale = do_rescale if do_rescale is not None else self.do_rescale do_normalize = do_normalize if do_normalize is not None else self.do_normalize do_pad = do_pad if do_pad is not None else self.do_pad rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size_divisibility = size_divisibility if size_divisibility is not None else self.size_divisibility images = make_list_of_images(images) trimaps = make_list_of_images(trimaps, expected_ndims=2) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if not valid_images(trimaps): raise ValueError( "Invalid trimap type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_pad and size_divisibility is None: raise ValueError("Size divisilibyt must be specified if do_pad is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] trimaps = [to_numpy_array(trimap) for trimap in trimaps] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] trimaps = [ self.rescale(image=trimap, scale=rescale_factor, input_data_format=input_data_format) for trimap in trimaps ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] # concatenate images and trimaps images = [ np.concatenate([image, np.expand_dims(trimap, axis=-1)], axis=-1) for image, trimap in zip(images, trimaps) ] if do_pad: images = [ self.pad_image(image, size_divisibility=size_divisibility, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image=image, channel_dim=data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
transformers/src/transformers/models/vitmatte/image_processing_vitmatte.py/0
{ "file_path": "transformers/src/transformers/models/vitmatte/image_processing_vitmatte.py", "repo_id": "transformers", "token_count": 5615 }
353
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Feature extractor class for Wav2Vec2 """ from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging logger = logging.get_logger(__name__) class Wav2Vec2FeatureExtractor(SequenceFeatureExtractor): r""" Constructs a Wav2Vec2 feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: feature_size (`int`, defaults to 1): The feature dimension of the extracted features. sampling_rate (`int`, defaults to 16000): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). padding_value (`float`, defaults to 0.0): The value that is used to fill the padding values. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to zero-mean unit-variance normalize the input. Normalizing can help to significantly improve the performance for some models, *e.g.*, [wav2vec2-lv60](https://huggingface.co/models?search=lv60). return_attention_mask (`bool`, *optional*, defaults to `False`): Whether or not [`~Wav2Vec2FeatureExtractor.__call__`] should return `attention_mask`. <Tip> Wav2Vec2 models that have set `config.feat_extract_norm == "group"`, such as [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base-960h), have **not** been trained using `attention_mask`. For such models, `input_values` should simply be padded with 0 and no `attention_mask` should be passed. For Wav2Vec2 models that have set `config.feat_extract_norm == "layer"`, such as [wav2vec2-lv60](https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self), `attention_mask` should be passed for batched inference. </Tip>""" model_input_names = ["input_values", "attention_mask"] def __init__( self, feature_size=1, sampling_rate=16000, padding_value=0.0, return_attention_mask=False, do_normalize=True, **kwargs, ): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.return_attention_mask = return_attention_mask self.do_normalize = do_normalize @staticmethod def zero_mean_unit_var_norm( input_values: List[np.ndarray], attention_mask: List[np.ndarray], padding_value: float = 0.0 ) -> List[np.ndarray]: """ Every array in the list is normalized to have zero mean and unit variance """ if attention_mask is not None: attention_mask = np.array(attention_mask, np.int32) normed_input_values = [] for vector, length in zip(input_values, attention_mask.sum(-1)): normed_slice = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7) if length < normed_slice.shape[0]: normed_slice[length:] = padding_value normed_input_values.append(normed_slice) else: normed_input_values = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values] return normed_input_values def __call__( self, raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], padding: Union[bool, str, PaddingStrategy] = False, max_length: Optional[int] = None, truncation: bool = False, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, sampling_rate: Optional[int] = None, **kwargs, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Args: raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). truncation (`bool`): Activates truncation to cut input sequences longer than *max_length* to *max_length*. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask) <Tip> Wav2Vec2 models that have set `config.feat_extract_norm == "group"`, such as [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base-960h), have **not** been trained using `attention_mask`. For such models, `input_values` should simply be padded with 0 and no `attention_mask` should be passed. For Wav2Vec2 models that have set `config.feat_extract_norm == "layer"`, such as [wav2vec2-lv60](https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self), `attention_mask` should be passed for batched inference. </Tip> return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. padding_value (`float`, defaults to 0.0): """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") is_batched = is_batched_numpy or ( isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) ) # always return batch if not is_batched: raw_speech = [raw_speech] # convert into correct format for padding encoded_inputs = BatchFeature({"input_values": raw_speech}) padded_inputs = self.pad( encoded_inputs, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) # convert input values to correct format input_values = padded_inputs["input_values"] if not isinstance(input_values[0], np.ndarray): padded_inputs["input_values"] = [np.asarray(array, dtype=np.float32) for array in input_values] elif ( not isinstance(input_values, np.ndarray) and isinstance(input_values[0], np.ndarray) and input_values[0].dtype is np.dtype(np.float64) ): padded_inputs["input_values"] = [array.astype(np.float32) for array in input_values] elif isinstance(input_values, np.ndarray) and input_values.dtype is np.dtype(np.float64): padded_inputs["input_values"] = input_values.astype(np.float32) # convert attention_mask to correct format attention_mask = padded_inputs.get("attention_mask") if attention_mask is not None: padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask] # zero-mean and unit-variance normalization if self.do_normalize: attention_mask = ( attention_mask if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD else None ) padded_inputs["input_values"] = self.zero_mean_unit_var_norm( padded_inputs["input_values"], attention_mask=attention_mask, padding_value=self.padding_value ) if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs
transformers/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py", "repo_id": "transformers", "token_count": 4795 }
354
# coding=utf-8 # Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for Wav2Vec2Phoneme.""" import json import os import sys from dataclasses import dataclass from itertools import groupby from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import numpy as np from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import ( ModelOutput, is_flax_available, is_tf_available, is_torch_available, logging, requires_backends, to_py_obj, ) logger = logging.get_logger(__name__) if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax.numpy as jnp # noqa: F401 VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "tokenizer_config_file": "tokenizer_config.json", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/wav2vec2-lv-60-espeak-cv-ft": ( "https://huggingface.co/facebook/wav2vec2-lv-60-espeak-cv-ft/resolve/main/vocab.json" ), }, "tokenizer_config_file": { "facebook/wav2vec2-lv-60-espeak-cv-ft": ( "https://huggingface.co/facebook/wav2vec2-lv-60-espeak-cv-ft/resolve/main/tokenizer_config.json" ), }, } # Wav2Vec2Phoneme has no max input length PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"facebook/wav2vec2-lv-60-espeak-cv-ft": sys.maxsize} ListOfDict = List[Dict[str, Union[int, str]]] @dataclass class Wav2Vec2PhonemeCTCTokenizerOutput(ModelOutput): """ Output type of [` Wav2Vec2PhonemeCTCTokenizer`], with transcription. Args: text (list of `str` or `str`): Decoded logits in text from. Usually the speech transcription. char_offsets (list of `List[Dict[str, Union[int, str]]]` or `List[Dict[str, Union[int, str]]]`): Offsets of the decoded characters. In combination with sampling rate and model downsampling rate char offsets can be used to compute time stamps for each charater. Total logit score of the beam associated with produced text. """ text: Union[List[str], str] char_offsets: Union[List[ListOfDict], ListOfDict] = None class Wav2Vec2PhonemeCTCTokenizer(PreTrainedTokenizer): """ Constructs a Wav2Vec2PhonemeCTC tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to the superclass for more information regarding such methods. Args: vocab_file (`str`): File containing the vocabulary. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sentence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sentence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. do_phonemize (`bool`, *optional*, defaults to `True`): Whether the tokenizer should phonetize the input or not. Only if a sequence of phonemes is passed to the tokenizer, `do_phonemize` should be set to `False`. phonemizer_lang (`str`, *optional*, defaults to `"en-us"`): The language of the phoneme set to which the tokenizer should phonetize the input text to. phonemizer_backend (`str`, *optional*. defaults to `"espeak"`): The backend phonetization library that shall be used by the phonemizer library. Defaults to `espeak-ng`. See the [phonemizer package](https://github.com/bootphon/phonemizer#readme). for more information. **kwargs Additional keyword arguments passed along to [`PreTrainedTokenizer`] """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", unk_token="<unk>", pad_token="<pad>", phone_delimiter_token=" ", word_delimiter_token=None, do_phonemize=True, phonemizer_lang="en-us", phonemizer_backend="espeak", **kwargs, ): self._word_delimiter_token = word_delimiter_token self._phone_delimiter_token = phone_delimiter_token self.do_phonemize = do_phonemize self.phonemizer_lang = phonemizer_lang self.phonemizer_backend = phonemizer_backend if do_phonemize: self.init_backend(self.phonemizer_lang) with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} super().__init__( unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, word_delimiter_token=word_delimiter_token, phone_delimiter_token=phone_delimiter_token, do_phonemize=do_phonemize, phonemizer_lang=phonemizer_lang, phonemizer_backend=phonemizer_backend, **kwargs, ) @property def vocab_size(self) -> int: return len(self.decoder) def get_vocab(self) -> Dict: vocab = dict(self.encoder.copy()) vocab.update(self.added_tokens_encoder) return vocab def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int: # Overwritten to never strip! to_add = [] for token in new_tokens: if isinstance(token, str): to_add.append(AddedToken(token, rstrip=False, lstrip=False, normalized=True, special=special_tokens)) else: to_add.append(token) return super()._add_tokens(to_add, special_tokens) def init_backend(self, phonemizer_lang: str): """ Initializes the backend. Args: phonemizer_lang (`str`): The language to be used. """ requires_backends(self, "phonemizer") from phonemizer.backend import BACKENDS self.backend = BACKENDS[self.phonemizer_backend](phonemizer_lang, language_switch="remove-flags") def prepare_for_tokenization( self, text: str, is_split_into_words: bool = False, phonemizer_lang: Optional[str] = None, do_phonemize: Optional[bool] = None, ) -> Tuple[str, Dict[str, Any]]: """ Performs any necessary transformations before tokenization. This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the `kwargs` at the end of the encoding process to be sure all the arguments have been used. Args: text (`str`): The text to prepare. is_split_into_words (`bool`, *optional*, defaults to `False`): Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. phonemizer_lang (`str`, *optional*): The language of the phoneme set to which the tokenizer should phonetize the input text to. do_phonemize (`bool`, *optional*): Whether the tokenizer should phonetize the input text or not. Only if a sequence of phonemes is passed to the tokenizer, `do_phonemize` should be set to `False`. Returns: `Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs. """ if is_split_into_words: text = " " + text # set whether tokenizer should phonemize or not if do_phonemize is not None: self.do_phonemize = do_phonemize # set the correct phonemizer language if phonemizer_lang is not None: self.phonemizer_lang = phonemizer_lang self.init_backend(phonemizer_lang) return (text, {}) def _tokenize(self, text, **kwargs): """ Converts a string into a sequence of tokens (string), using the tokenizer. """ # make sure whitespace is stripped to prevent <unk> text = text.strip() # phonemize if self.do_phonemize: text = text.lower() # create list of phonemes text = self.phonemize(text, self.phonemizer_lang) # make sure ' ' is between phonemes tokens = text.split(" ") tokens = list(filter(lambda p: p.strip() != "", tokens)) return tokens def phonemize(self, text: str, phonemizer_lang: Optional[str] = None) -> str: from phonemizer.separator import Separator word_delimiter = self.word_delimiter_token + " " if self.word_delimiter_token is not None else "" if phonemizer_lang is not None and phonemizer_lang != self.phonemizer_lang: self.init_backend(phonemizer_lang) else: phonemizer_lang = self.phonemizer_lang separator = Separator(phone=self.phone_delimiter_token, word=word_delimiter, syllable="") phonemes = self.backend.phonemize( [text], separator=separator, ) phonemes = phonemes[0].strip() return phonemes @property def word_delimiter_token(self) -> str: """ `str`: Word delimiter token. Log an error if used while not having been set. """ if self._word_delimiter_token is None: if self.verbose: logger.error("Using word_delimiter_token, but it is not set yet.") return None return str(self._word_delimiter_token) @property def word_delimiter_token_id(self) -> Optional[int]: """ `Optional[int]`: Id of the word_delimiter_token in the vocabulary. Returns `None` if the token has not been set. """ if self._word_delimiter_token is None: return None return self.convert_tokens_to_ids(self.word_delimiter_token) @word_delimiter_token.setter def word_delimiter_token(self, value): self._word_delimiter_token = value @word_delimiter_token_id.setter def word_delimiter_token_id(self, value): self._word_delimiter_token = self.convert_tokens_to_ids(value) @property def phone_delimiter_token(self) -> str: """ `str`: Word delimiter token. Log an error if used while not having been set. """ if self._phone_delimiter_token is None: if self.verbose: logger.error("Using phone_delimiter_token, but it is not set yet.") return None return str(self._phone_delimiter_token) @property def phone_delimiter_token_id(self) -> Optional[int]: """ `Optional[int]`: Id of the phone_delimiter_token in the vocabulary. Returns `None` if the token has not been set. """ if self._phone_delimiter_token is None: return None return self.convert_tokens_to_ids(self.phone_delimiter_token) @phone_delimiter_token.setter def phone_delimiter_token(self, value): self._phone_delimiter_token = value @phone_delimiter_token_id.setter def phone_delimiter_token_id(self, value): self._phone_delimiter_token = self.convert_tokens_to_ids(value) def _convert_token_to_id(self, token: str) -> int: """Converts a token (str) in an index (integer) using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) in a token (str) using the vocab.""" result = self.decoder.get(index, self.unk_token) return result def convert_tokens_to_string( self, tokens: List[str], group_tokens: bool = True, spaces_between_special_tokens: bool = False, filter_word_delimiter_token: bool = True, output_char_offsets: bool = False, ) -> str: """ Converts a connectionist-temporal-classification (CTC) output tokens into a single string. """ # group same tokens into non-repeating tokens in CTC style decoding if group_tokens: chars, char_repetitions = zip(*((token, len(list(group_iter))) for token, group_iter in groupby(tokens))) else: chars = tokens char_repetitions = len(tokens) * [1] # filter self.pad_token which is used as CTC-blank token processed_chars = list(filter(lambda char: char != self.pad_token, chars)) # also filter self.word_delimiter_token if not not if filter_word_delimiter_token and self.word_delimiter_token is not None: processed_chars = list(filter(lambda token: token != self.word_delimiter_token, processed_chars)) # retrieve offsets char_offsets = None if output_char_offsets: word_delimiter_token_for_offsets = ( self.word_delimiter_token if filter_word_delimiter_token is True else None ) char_offsets = self._compute_offsets( char_repetitions, chars, self.pad_token, word_delimiter_token=word_delimiter_token_for_offsets ) if len(char_offsets) != len(processed_chars): raise ValueError( f"`char_offsets`: {char_offsets} and `processed_tokens`: {processed_chars}" " have to be of the same length, but are: `len(offsets)`: " f"{len(char_offsets)} and `len(processed_tokens)`: {len(processed_chars)}" ) # set tokens to correct processed token for i, char in enumerate(processed_chars): char_offsets[i]["char"] = char string = " ".join(processed_chars).strip() return {"text": string, "char_offsets": char_offsets} @staticmethod def _compute_offsets( char_repetitions: List[int], chars: List[str], ctc_token: int, word_delimiter_token: Optional[int] = None ) -> List[Dict[str, Union[str, int]]]: end_indices = np.asarray(char_repetitions).cumsum() start_indices = np.concatenate(([0], end_indices[:-1])) offsets = [ {"char": t, "start_offset": s, "end_offset": e} for t, s, e in zip(chars, start_indices, end_indices) ] # filter out CTC token offsets = list(filter(lambda offsets: offsets["char"] != ctc_token, offsets)) # filter out word delimiter token if necessary if word_delimiter_token is not None: offsets = list(filter(lambda offsets: offsets["char"] != word_delimiter_token, offsets)) return offsets def _decode( self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, group_tokens: bool = True, filter_word_delimiter_token: bool = True, spaces_between_special_tokens: bool = False, output_char_offsets: bool = False, ) -> str: """ special _decode function is needed for Wav2Vec2PhonemeTokenizer because added tokens should be treated exactly the same as tokens of the base vocabulary and therefore the function `convert_tokens_to_string` has to be called on the whole token list and not individually on added tokens """ filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) result = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue result.append(token) string_output = self.convert_tokens_to_string( result, group_tokens=group_tokens, spaces_between_special_tokens=spaces_between_special_tokens, filter_word_delimiter_token=filter_word_delimiter_token, output_char_offsets=output_char_offsets, ) text = string_output["text"] clean_up_tokenization_spaces = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: text = self.clean_up_tokenization(text) if output_char_offsets: return Wav2Vec2PhonemeCTCTokenizerOutput(text=text, char_offsets=string_output["char_offsets"]) else: return text # overwritten from `tokenization_utils_base.py` because we need docs for `output_char_offsets` here def decode( self, token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, output_char_offsets: bool = False, **kwargs, ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. Args: token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. output_char_offsets (`bool`, *optional*, defaults to `False`): Whether or not to output character offsets. Character offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed characters. <Tip> Please take a look at the Example of [`~models.wav2vec2.tokenization_wav2vec2.decode`] to better understand how to make use of `output_word_offsets`. [`~model.wav2vec2_phoneme.tokenization_wav2vec2_phoneme.batch_decode`] works the same way with phonemes. </Tip> kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `str` or [`~models.wav2vec2.tokenization_wav2vec2_phoneme.Wav2Vec2PhonemeCTCTokenizerOutput`]: The decoded sentence. Will be a [`~models.wav2vec2.tokenization_wav2vec2_phoneme.Wav2Vec2PhonemeCTCTokenizerOutput`] when `output_char_offsets == True`. """ # Convert inputs to python lists token_ids = to_py_obj(token_ids) return self._decode( token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, output_char_offsets=output_char_offsets, **kwargs, ) # overwritten from `tokenization_utils_base.py` because tokenizer can output # `ModelOutput` which should not be a list for batched output and because # we need docs for `output_char_offsets` here def batch_decode( self, sequences: Union[List[int], List[List[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, output_char_offsets: bool = False, **kwargs, ) -> List[str]: """ Convert a list of lists of token ids into a list of strings by calling decode. Args: sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. output_char_offsets (`bool`, *optional*, defaults to `False`): Whether or not to output character offsets. Character offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed characters. <Tip> Please take a look at the Example of [`~models.wav2vec2.tokenization_wav2vec2.decode`] to better understand how to make use of `output_word_offsets`. [`~model.wav2vec2_phoneme.tokenization_wav2vec2_phoneme.batch_decode`] works analogous with phonemes and batched output. </Tip> kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `List[str]` or [`~models.wav2vec2.tokenization_wav2vec2_phoneme.Wav2Vec2PhonemeCTCTokenizerOutput`]: The decoded sentence. Will be a [`~models.wav2vec2.tokenization_wav2vec2_phoneme.Wav2Vec2PhonemeCTCTokenizerOutput`] when `output_char_offsets == True`. """ batch_decoded = [ self.decode( seq, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, output_char_offsets=output_char_offsets, **kwargs, ) for seq in sequences ] if output_char_offsets: # transform list of dicts to dict of lists return Wav2Vec2PhonemeCTCTokenizerOutput({k: [d[k] for d in batch_decoded] for k in batch_decoded[0]}) return batch_decoded def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,)
transformers/src/transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py", "repo_id": "transformers", "token_count": 10427 }
355
# coding=utf-8 # Copyright 2022 The OpenAI Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Whisper model.""" import math from typing import Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, SequenceClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings, ) from .configuration_whisper import WhisperConfig from .generation_whisper import WhisperGenerationMixin if is_flash_attn_2_available(): from flash_attn import flash_attn_func, flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 1 _CONFIG_FOR_DOC = "WhisperConfig" _CHECKPOINT_FOR_DOC = "openai/whisper-tiny" WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "openai/whisper-base", # See all Whisper models at https://huggingface.co/models?filter=whisper ] # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) return ( indices, cu_seqlens, max_seqlen_in_batch, ) def sinusoids(length: int, channels: int, max_timescale: float = 10000) -> torch.Tensor: """Returns sinusoids for positional embedding""" if channels % 2 != 0: raise ValueError( f"Number of channels has to be divisible by 2 for sinusoidal positional embeddings, got {channels} channels." ) log_timescale_increment = math.log(max_timescale) / (channels // 2 - 1) inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2)) scaled_time = torch.arange(length).view(-1, 1) * inv_timescales.view(1, -1) return torch.cat([scaled_time.sin(), scaled_time.cos()], dim=1) # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: The shape for which to compute masks. This should be of a tuple of size 2 where the first element is the batch size and the second element is the length of the axis to span. mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of independently generated mask spans of length `mask_length` is computed by `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the actual percentage will be smaller. mask_length: size of the mask min_masks: minimum number of masked spans attention_mask: A (right-padded) attention mask which independently shortens the feature axis of each batch dimension. """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}" f" and `sequence_length`: {sequence_length}`" ) # epsilon is used for probabilistic rounding epsilon = np.random.rand(1).item() def compute_num_masked_span(input_length): """Given input length, compute how many spans should be masked""" num_masked_span = int(mask_prob * input_length / mask_length + epsilon) num_masked_span = max(num_masked_span, min_masks) # make sure num masked span <= sequence_length if num_masked_span * mask_length > sequence_length: num_masked_span = sequence_length // mask_length # make sure num_masked span is also <= input_length - (mask_length - 1) if input_length - (mask_length - 1) < num_masked_span: num_masked_span = max(input_length - (mask_length - 1), 0) return num_masked_span # compute number of masked spans in batch input_lengths = ( attention_mask.sum(-1).detach().tolist() if attention_mask is not None else [sequence_length for _ in range(batch_size)] ) # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) spec_aug_mask_idxs = [] max_num_masked_span = compute_num_masked_span(sequence_length) if max_num_masked_span == 0: return spec_aug_mask for input_length in input_lengths: # compute num of masked spans for this input num_masked_span = compute_num_masked_span(input_length) # get random indices to mask spec_aug_mask_idx = np.random.choice( np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False ) # pick first sampled index that will serve as a dummy index to pad vector # to ensure same dimension for all batches due to probabilistic rounding # Picking first sample just pads those vectors twice. if len(spec_aug_mask_idx) == 0: # this case can only happen if `input_length` is strictly smaller then # `sequence_length` in which case the last token has to be a padding # token which we can use as a dummy mask id dummy_mask_idx = sequence_length - 1 else: dummy_mask_idx = spec_aug_mask_idx[0] spec_aug_mask_idx = np.concatenate( [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx] ) spec_aug_mask_idxs.append(spec_aug_mask_idx) spec_aug_mask_idxs = np.array(spec_aug_mask_idxs) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to( spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length) ) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length) # add offset to the starting indexes so that indexes now create a span offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape( batch_size, max_num_masked_span * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # ensure that we cannot have indices larger than sequence_length if spec_aug_mask_idxs.max() > sequence_length - 1: spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1 # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) return spec_aug_mask class WhisperPositionalEmbedding(nn.Embedding): def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): super().__init__(num_positions, embedding_dim) def forward(self, input_ids, past_key_values_length=0, position_ids=None): if position_ids is None: return self.weight[past_key_values_length : past_key_values_length + input_ids.shape[1]] else: return self.weight[position_ids] class WhisperAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[WhisperConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=False) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) # Copied from transformers.models.bart.modeling_bart.BartAttention._shape with BART->whisper def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() # Copied from transformers.models.bart.modeling_bart.BartAttention.forward with BART->whisper def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value # Copied from transformers.models.bart.modeling_bart.BartFlashAttention2 with Bart->Whisper class WhisperFlashAttention2(WhisperAttention): """ Whisper flash attention module. This module inherits from `WhisperAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim) def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: # WhisperFlashAttention2 attention does not support output_attentions if output_attentions: raise ValueError("WhisperFlashAttention2 attention does not support output_attentions") # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, q_len, _ = hidden_states.size() # get query proj query_states = self._reshape(self.q_proj(hidden_states), -1, bsz) # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0].transpose(1, 2) value_states = past_key_value[1].transpose(1, 2) elif is_cross_attention: # cross_attentions key_states = self._reshape(self.k_proj(key_value_states), -1, bsz) value_states = self._reshape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1) value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1) else: # self_attention key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2)) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (LlamaRMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = self._flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, dropout=self.dropout ) attn_output = attn_output.reshape(bsz, q_len, -1) attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward def _flash_attention_forward( self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None ): """ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token first unpad the input, then computes the attention scores and pad the final attention scores. Args: query_states (`torch.Tensor`): Input query states to be passed to Flash Attention API key_states (`torch.Tensor`): Input key states to be passed to Flash Attention API value_states (`torch.Tensor`): Input value states to be passed to Flash Attention API attention_mask (`torch.Tensor`): The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the position of padding tokens and 1 for the position of non-padding tokens. dropout (`int`, *optional*): Attention dropout softmax_scale (`float`, *optional*): The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) """ if not self._flash_attn_uses_top_left_mask: causal = self.is_causal else: # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. causal = self.is_causal and query_length != 1 # Contains at least one padding token in the sequence if attention_mask is not None: batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( query_states, key_states, value_states, attention_mask, query_length ) cu_seqlens_q, cu_seqlens_k = cu_seq_lens max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) else: attn_output = flash_attn_func( query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal ) return attn_output # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape key_layer = index_first_axis( key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) value_layer = index_first_axis( value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) if query_length == kv_seq_len: query_layer = index_first_axis( query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k ) cu_seqlens_q = cu_seqlens_k max_seqlen_in_batch_q = max_seqlen_in_batch_k indices_q = indices_k elif query_length == 1: max_seqlen_in_batch_q = 1 cu_seqlens_q = torch.arange( batch_size + 1, dtype=torch.int32, device=query_layer.device ) # There is a memcpy here, that is very bad. indices_q = cu_seqlens_q[:-1] query_layer = query_layer.squeeze(1) else: # The -q_len: slice assumes left padding. attention_mask = attention_mask[:, -query_length:] query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) return ( query_layer, key_layer, value_layer, indices_q, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_in_batch_q, max_seqlen_in_batch_k), ) class WhisperSdpaAttention(WhisperAttention): # Copied from transformers.models.bart.modeling_bart.BartSdpaAttention.forward with BART->whisper, Bart->Whisper def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" if output_attentions or layer_head_mask is not None: # TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once this is implemented. logger.warning_once( "WhisperModel is using WhisperSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `layer_head_mask` not None. Falling back to the manual attention" ' implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states, key_value_states=key_value_states, past_key_value=past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) query_states = self._shape(query_states, tgt_len, bsz) # NOTE: SDPA with memory-efficient backend is currently (torch==2.1.2) bugged when using non-contiguous inputs and a custom attn_mask, # but we are fine here as `_shape` do call `.contiguous()`. Reference: https://github.com/pytorch/pytorch/issues/112577 attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=self.dropout if self.training else 0.0, # The tgt_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case tgt_len == 1. is_causal=self.is_causal and attention_mask is None and tgt_len > 1, ) if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, None, past_key_value WHISPER_ATTENTION_CLASSES = { "eager": WhisperAttention, "flash_attention_2": WhisperFlashAttention2, "sdpa": WhisperSdpaAttention, } # Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Whisper, MBART->WHISPER class WhisperEncoderLayer(nn.Module): def __init__(self, config: WhisperConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = WHISPER_ATTENTION_CLASSES[config._attn_implementation]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool = False, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Whisper, MBART->WHISPER class WhisperDecoderLayer(nn.Module): def __init__(self, config: WhisperConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = WHISPER_ATTENTION_CLASSES[config._attn_implementation]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = WHISPER_ATTENTION_CLASSES[config._attn_implementation]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class WhisperPreTrainedModel(PreTrainedModel): config_class = WhisperConfig base_model_prefix = "model" main_input_name = "input_features" supports_gradient_checkpointing = True _no_split_modules = ["WhisperEncoderLayer", "WhisperDecoderLayer"] _supports_flash_attn_2 = True _supports_sdpa = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, WhisperEncoder): with torch.no_grad(): embed_positions = module.embed_positions.weight embed_positions.copy_(sinusoids(*embed_positions.shape)) def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): """ Computes the output length of the convolutional layers """ input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths WHISPER_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`WhisperConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ WHISPER_INPUTS_DOCSTRING = r""" Args: input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, sequence_length)`): Float values mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`] attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing *SpecAugment* data augmentation on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Whisper uses the `decoder_start_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_whisper._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the BART paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ WHISPER_ENCODER_INPUTS_DOCSTRING = r""" Args: input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, sequence_length)`): Float values mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`] head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class WhisperEncoder(WhisperPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`WhisperEncoderLayer`]. Args: config: WhisperConfig """ def __init__(self, config: WhisperConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.num_mel_bins = config.num_mel_bins self.padding_idx = config.pad_token_id self.max_source_positions = config.max_source_positions self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.conv1 = nn.Conv1d(self.num_mel_bins, embed_dim, kernel_size=3, padding=1) self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1) self.embed_positions = nn.Embedding(self.max_source_positions, embed_dim) self.embed_positions.requires_grad_(False) self.layers = nn.ModuleList([WhisperEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def get_input_embeddings(self) -> nn.Module: return self.conv1 def set_input_embeddings(self, value: nn.Module): self.conv1 = value def forward( self, input_features, attention_mask=None, head_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_features (`torch.LongTensor` of shape `(batch_size, feature_size, sequence_length)`): Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`] attention_mask (`torch.Tensor`)`, *optional*): Whisper does not support masking of the `input_features`, this argument is preserved for compatibility, but it is not used. By default the silence in the input log mel spectrogram are ignored. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ expected_seq_length = self.config.max_source_positions * self.conv1.stride[0] * self.conv2.stride[0] if input_features.shape[-1] != expected_seq_length: raise ValueError( f"Whisper expects the mel input features to be of length {expected_seq_length}, but found {input_features.shape[-1]}. Make sure to pad the input mel features to {expected_seq_length}." ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict inputs_embeds = nn.functional.gelu(self.conv1(input_features)) inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds)) inputs_embeds = inputs_embeds.permute(0, 2, 1) embed_pos = self.embed_positions.weight hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, None, (head_mask[idx] if head_mask is not None else None), output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, None, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class WhisperDecoder(WhisperPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`WhisperDecoderLayer`] Args: config: WhisperConfig """ main_input_name = "input_ids" def __init__(self, config: WhisperConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_target_positions self.max_source_positions = config.max_source_positions self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) self.embed_positions = WhisperPositionalEmbedding(self.max_target_positions, config.d_model) self.layers = nn.ModuleList([WhisperDecoderLayer(config) for _ in range(config.decoder_layers)]) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" self._use_sdpa = config._attn_implementation == "sdpa" self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, position_ids=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if self._use_flash_attention_2: # 2d mask is passed through the layers attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None elif self._use_sdpa and head_mask is None and not output_attentions: # output_attentions=True & head_mask can not be supported when using SDPA. attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, input_shape, inputs_embeds, past_key_values_length ) else: # 4d mask is passed through the layers attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # embed positions if input_ids is not None: positions = self.embed_positions( input_ids, past_key_values_length=past_key_values_length, position_ids=position_ids ) else: positions = self.embed_positions( inputs_embeds, past_key_values_length=past_key_values_length, position_ids=position_ids ) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: assert attn_mask.size()[0] == (len(self.layers)), ( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, None, # encoder attention mask head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, # past_key_value output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare Whisper Model outputting raw hidden-states without any specific head on top.", WHISPER_START_DOCSTRING, ) class WhisperModel(WhisperPreTrainedModel): def __init__(self, config: WhisperConfig): super().__init__(config) self.encoder = WhisperEncoder(config) self.decoder = WhisperDecoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def freeze_encoder(self): """ Calling this function will disable the gradient computation for the Whisper encoder so that its parameters will not be updated during training. """ self.encoder._freeze_parameters() def _mask_input_features( self, input_features: torch.FloatTensor, attention_mask: Optional[torch.LongTensor] = None, ): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://arxiv.org/abs/1904.08779). """ # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return input_features # generate indices & apply SpecAugment along time axis batch_size, hidden_size, sequence_length = input_features.size() if self.config.mask_time_prob > 0 and self.training: # generate indices & apply SpecAugment along time axis mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks, ) mask_time_indices = torch.tensor(mask_time_indices, device=input_features.device, dtype=torch.bool) mask_time_indices = mask_time_indices[:, None].expand(-1, hidden_size, -1) input_features[mask_time_indices] = 0 if self.config.mask_feature_prob > 0 and self.training: # generate indices & apply SpecAugment along feature axis mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks, ) mask_feature_indices = torch.tensor(mask_feature_indices, device=input_features.device, dtype=torch.bool) input_features[mask_feature_indices] = 0 return input_features @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_features: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, decoder_inputs_embeds: Optional[Tuple[torch.FloatTensor]] = None, decoder_position_ids: Optional[Tuple[torch.LongTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]: r""" Returns: Example: ```python >>> import torch >>> from transformers import AutoFeatureExtractor, WhisperModel >>> from datasets import load_dataset >>> model = WhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt") >>> input_features = inputs.input_features >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: input_features = self._mask_input_features(input_features, attention_mask=attention_mask) encoder_outputs = self.encoder( input_features, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, position_ids=decoder_position_ids, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The Whisper Model with a language modeling head. Can be used for automatic speech recognition.", WHISPER_START_DOCSTRING, ) class WhisperForConditionalGeneration(WhisperGenerationMixin, WhisperPreTrainedModel): base_model_prefix = "model" _tied_weights_keys = ["proj_out.weight"] def __init__(self, config: WhisperConfig): super().__init__(config) self.model = WhisperModel(config) self.proj_out = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def get_output_embeddings(self): return self.proj_out def set_output_embeddings(self, new_embeddings): self.proj_out = new_embeddings def get_input_embeddings(self) -> nn.Module: return self.model.get_input_embeddings() def freeze_encoder(self): """ Calling this function will disable the gradient computation for the Whisper encoder so that its parameters will not be updated during training. """ self.model.encoder._freeze_parameters() @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_features: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, decoder_inputs_embeds: Optional[Tuple[torch.FloatTensor]] = None, decoder_position_ids: Optional[Tuple[torch.LongTensor]] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> import torch >>> from transformers import AutoProcessor, WhisperForConditionalGeneration >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt") >>> input_features = inputs.input_features >>> generated_ids = model.generate(inputs=input_features) >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> transcription ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.' ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_features, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, decoder_position_ids=decoder_position_ids, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.proj_out(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # move labels to correct device to enable PP labels = labels.to(lm_logits.device) loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.reshape(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, use_cache=None, encoder_outputs=None, attention_mask=None, decoder_attention_mask=None, **kwargs, ): decoder_position_ids = None if decoder_attention_mask is not None: decoder_position_ids = (decoder_attention_mask.cumsum(-1) - 1).clamp(min=0) if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if decoder_input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = decoder_input_ids.shape[1] - 1 decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] if decoder_position_ids is not None and decoder_position_ids.shape[1] > decoder_input_ids.shape[1]: decoder_position_ids = decoder_position_ids[:, remove_prefix_length:] return { "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "use_cache": use_cache, "decoder_attention_mask": decoder_attention_mask, "decoder_position_ids": decoder_position_ids, } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past class WhisperDecoderWrapper(WhisperPreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) config.is_encoder_decoder = False self.decoder = WhisperDecoder(config) def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) @add_start_docstrings( """ Whisper decoder with with a language modeling head on top (linear layer with weights tied to the input embeddings). """, WHISPER_START_DOCSTRING, ) class WhisperForCausalLM(WhisperPreTrainedModel): _tied_weights_keys = ["proj_out.weight"] main_input_name = "input_ids" def __init__(self, config): super().__init__(config) config.is_encoder_decoder = False self.model = WhisperDecoderWrapper(config) self.proj_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.proj_out def set_output_embeddings(self, new_embeddings): self.proj_out = new_embeddings def get_input_embeddings(self) -> nn.Module: return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_outputs (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: Example: ```python >>> from transformers import WhisperForCausalLM, WhisperForConditionalGeneration, WhisperProcessor >>> import torch >>> from datasets import load_dataset >>> processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2") >>> assistant_model = WhisperForCausalLM.from_pretrained("distil-whisper/distil-large-v2") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> sample = ds[0]["audio"] >>> input_features = processor( ... sample["array"], sampling_rate=sample["sampling_rate"], return_tensors="pt" ... ).input_features >>> predicted_ids = model.generate(input_features, assistant_model=assistant_model) >>> # decode token ids to text >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0] >>> transcription ' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.' ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # If the user passed a tuple or `BaseModelOutput` for encoder_outputs, we extract only the hidden states if isinstance(encoder_outputs, (BaseModelOutput, tuple, list)): encoder_outputs = encoder_outputs[0] # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_outputs, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.proj_out(outputs[0]) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, use_cache=None, encoder_outputs=None, attention_mask=None, **kwargs, ): if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return { "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "input_ids": input_ids, "use_cache": use_cache, "attention_mask": attention_mask, } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past @add_start_docstrings( """ Whisper Encoder Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting. """, WHISPER_ENCODER_INPUTS_DOCSTRING, ) class WhisperForAudioClassification(WhisperPreTrainedModel): def __init__(self, config): super().__init__(config) self.encoder = WhisperEncoder(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) # Initialize weights and apply final processing self.post_init() def freeze_encoder(self): """ Calling this function will disable the gradient computation for the Whisper encoder so that its parameters will not be updated during training. Only the projection layers and classification head will be updated. """ self.encoder._freeze_parameters() def get_input_embeddings(self) -> nn.Module: return self.encoder.get_input_embeddings() def set_input_embeddings(self, value: nn.Module): self.encoder.set_input_embeddings(value) @add_start_docstrings_to_model_forward(WHISPER_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_features: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Example: ```python >>> import torch >>> from transformers import AutoFeatureExtractor, WhisperForAudioClassification >>> from datasets import load_dataset >>> feature_extractor = AutoFeatureExtractor.from_pretrained("sanchit-gandhi/whisper-medium-fleurs-lang-id") >>> model = WhisperForAudioClassification.from_pretrained("sanchit-gandhi/whisper-medium-fleurs-lang-id") >>> ds = load_dataset("google/fleurs", "all", split="validation", streaming=True) >>> sample = next(iter(ds)) >>> inputs = feature_extractor( ... sample["audio"]["array"], sampling_rate=sample["audio"]["sampling_rate"], return_tensors="pt" ... ) >>> input_features = inputs.input_features >>> with torch.no_grad(): ... logits = model(input_features).logits >>> predicted_class_ids = torch.argmax(logits).item() >>> predicted_label = model.config.id2label[predicted_class_ids] >>> predicted_label 'Afrikaans' ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if self.config.use_weighted_layer_sum: output_hidden_states = True elif output_hidden_states is None: output_hidden_states = self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_features, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = encoder_outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = encoder_outputs[0] hidden_states = self.projector(hidden_states) pooled_output = hidden_states.mean(dim=1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # move labels to correct device to enable PP labels = labels.to(logits.device) loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + encoder_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
transformers/src/transformers/models/whisper/modeling_whisper.py/0
{ "file_path": "transformers/src/transformers/models/whisper/modeling_whisper.py", "repo_id": "transformers", "token_count": 45507 }
356
# coding=utf-8 # Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for XGLM.""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xglm import XGLMTokenizer else: XGLMTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/xglm-564M": 2048, } class XGLMTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" XGLM tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = XGLMTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", **kwargs, ): # Compatibility with the original tokenizer self.num_madeup_words = 7 madeup_words = [f"<madeupword{i}>" for i in range(self.num_madeup_words)] kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", []) or [] kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, **kwargs, ) self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.sep_token_id] + token_ids_0 sep = [self.sep_token_id] return sep + token_ids_0 + sep + sep + token_ids_1 def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] if token_ids_1 is None: return len(sep + token_ids_0) * [0] return len(sep + token_ids_0 + sep + sep + token_ids_1) * [0] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory.") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
transformers/src/transformers/models/xglm/tokenization_xglm_fast.py/0
{ "file_path": "transformers/src/transformers/models/xglm/tokenization_xglm_fast.py", "repo_id": "transformers", "token_count": 3356 }
357
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """ Tokenization classes for XLM-RoBERTa model.""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "xlm-roberta-base": 512, "xlm-roberta-large": 512, "xlm-roberta-large-finetuned-conll02-dutch": 512, "xlm-roberta-large-finetuned-conll02-spanish": 512, "xlm-roberta-large-finetuned-conll03-english": 512, "xlm-roberta-large-finetuned-conll03-german": 512, } class XLMRobertaTokenizer(PreTrainedTokenizer): """ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab self.fairseq_offset = 1 self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + self.fairseq_offset self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None state["sp_model_proto"] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] @property def vocab_size(self): return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> List[str]: # TODO check if the t5/llama PR also applies here return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] spm_id = self.sp_model.PieceToId(token) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,)
transformers/src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py/0
{ "file_path": "transformers/src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py", "repo_id": "transformers", "token_count": 6160 }
358
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ctypes import c_float, sizeof from enum import Enum from typing import TYPE_CHECKING, Optional, Union if TYPE_CHECKING: from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer # tests_ignore class ParameterFormat(Enum): Float = c_float @property def size(self) -> int: """ Number of byte required for this data type Returns: Integer > 0 """ return sizeof(self.value) def compute_effective_axis_dimension(dimension: int, fixed_dimension: int, num_token_to_add: int = 0) -> int: """ Args: dimension: fixed_dimension: num_token_to_add: Returns: """ # < 0 is possible if using a dynamic axis if dimension <= 0: dimension = fixed_dimension dimension -= num_token_to_add return dimension def compute_serialized_parameters_size(num_parameters: int, dtype: ParameterFormat) -> int: """ Compute the size taken by all the parameters in the given the storage format when serializing the model Args: num_parameters: Number of parameters to be saved dtype: The data format each parameter will be saved Returns: Size (in byte) taken to save all the parameters """ return num_parameters * dtype.size def get_preprocessor(model_name: str) -> Optional[Union["AutoTokenizer", "AutoFeatureExtractor", "AutoProcessor"]]: """ Gets a preprocessor (tokenizer, feature extractor or processor) that is available for `model_name`. Args: model_name (`str`): Name of the model for which a preprocessor are loaded. Returns: `Optional[Union[AutoTokenizer, AutoFeatureExtractor, AutoProcessor]]`: If a processor is found, it is returned. Otherwise, if a tokenizer or a feature extractor exists, it is returned. If both a tokenizer and a feature extractor exist, an error is raised. The function returns `None` if no preprocessor is found. """ # Avoid circular imports by only importing this here. from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer # tests_ignore try: return AutoProcessor.from_pretrained(model_name) except (ValueError, OSError, KeyError): tokenizer, feature_extractor = None, None try: tokenizer = AutoTokenizer.from_pretrained(model_name) except (OSError, KeyError): pass try: feature_extractor = AutoFeatureExtractor.from_pretrained(model_name) except (OSError, KeyError): pass if tokenizer is not None and feature_extractor is not None: raise ValueError( f"Couldn't auto-detect preprocessor for {model_name}. Found both a tokenizer and a feature extractor." ) elif tokenizer is None and feature_extractor is None: return None elif tokenizer is not None: return tokenizer else: return feature_extractor
transformers/src/transformers/onnx/utils.py/0
{ "file_path": "transformers/src/transformers/onnx/utils.py", "repo_id": "transformers", "token_count": 1291 }
359
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import Pipeline, build_pipeline_init_args if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES logger = logging.get_logger(__name__) @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True, has_image_processor=True)) class ImageToTextPipeline(Pipeline): """ Image To Text pipeline using a `AutoModelForVision2Seq`. This pipeline predicts a caption for a given image. Example: ```python >>> from transformers import pipeline >>> captioner = pipeline(model="ydshieh/vit-gpt2-coco-en") >>> captioner("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") [{'generated_text': 'two birds are standing next to each other '}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This image to text pipeline can currently be loaded from pipeline() using the following task identifier: "image-to-text". See the list of available models on [huggingface.co/models](https://huggingface.co/models?pipeline_tag=image-to-text). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) requires_backends(self, "vision") self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES ) def _sanitize_parameters(self, max_new_tokens=None, generate_kwargs=None, prompt=None, timeout=None): forward_kwargs = {} preprocess_params = {} if prompt is not None: preprocess_params["prompt"] = prompt if timeout is not None: preprocess_params["timeout"] = timeout if generate_kwargs is not None: forward_kwargs["generate_kwargs"] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: forward_kwargs["generate_kwargs"] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( "'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter," " please use only one" ) forward_kwargs["generate_kwargs"]["max_new_tokens"] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs): """ Assign labels to the image(s) passed as inputs. Args: images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a HTTP(s) link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. max_new_tokens (`int`, *optional*): The amount of maximum tokens to generate. By default it will use `generate` default. generate_kwargs (`Dict`, *optional*): Pass it to send all of these arguments directly to `generate` allowing full control of this function. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A list or a list of list of `dict`: Each result comes as a dictionary with the following key: - **generated_text** (`str`) -- The generated text. """ return super().__call__(images, **kwargs) def preprocess(self, image, prompt=None, timeout=None): image = load_image(image, timeout=timeout) if prompt is not None: if not isinstance(prompt, str): raise ValueError( f"Received an invalid text input, got - {type(prompt)} - but expected a single string. " "Note also that one single text can be provided for conditional image to text generation." ) model_type = self.model.config.model_type if model_type == "git": model_inputs = self.image_processor(images=image, return_tensors=self.framework) input_ids = self.tokenizer(text=prompt, add_special_tokens=False).input_ids input_ids = [self.tokenizer.cls_token_id] + input_ids input_ids = torch.tensor(input_ids).unsqueeze(0) model_inputs.update({"input_ids": input_ids}) elif model_type == "pix2struct": model_inputs = self.image_processor(images=image, header_text=prompt, return_tensors=self.framework) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation model_inputs = self.image_processor(images=image, return_tensors=self.framework) text_inputs = self.tokenizer(prompt, return_tensors=self.framework) model_inputs.update(text_inputs) else: raise ValueError(f"Model type {model_type} does not support conditional text generation") else: model_inputs = self.image_processor(images=image, return_tensors=self.framework) if self.model.config.model_type == "git" and prompt is None: model_inputs["input_ids"] = None return model_inputs def _forward(self, model_inputs, generate_kwargs=None): # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["input_ids"], list) and all(x is None for x in model_inputs["input_ids"]) ): model_inputs["input_ids"] = None if generate_kwargs is None: generate_kwargs = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. inputs = model_inputs.pop(self.model.main_input_name) model_outputs = self.model.generate(inputs, **model_inputs, **generate_kwargs) return model_outputs def postprocess(self, model_outputs): records = [] for output_ids in model_outputs: record = { "generated_text": self.tokenizer.decode( output_ids, skip_special_tokens=True, ) } records.append(record) return records
transformers/src/transformers/pipelines/image_to_text.py/0
{ "file_path": "transformers/src/transformers/pipelines/image_to_text.py", "repo_id": "transformers", "token_count": 3223 }
360
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import ChunkPipeline, build_pipeline_init_args if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES logger = logging.get_logger(__name__) @add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) class ZeroShotObjectDetectionPipeline(ChunkPipeline): """ Zero shot object detection pipeline using `OwlViTForObjectDetection`. This pipeline predicts bounding boxes of objects when you provide an image and a set of `candidate_labels`. Example: ```python >>> from transformers import pipeline >>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection") >>> detector( ... "http://images.cocodataset.org/val2017/000000039769.jpg", ... candidate_labels=["cat", "couch"], ... ) [{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}] >>> detector( ... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", ... candidate_labels=["head", "bird"], ... ) [{'score': 0.119, 'label': 'bird', 'box': {'xmin': 71, 'ymin': 170, 'xmax': 410, 'ymax': 508}}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"zero-shot-object-detection"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=zero-shot-object-detection). """ def __init__(self, **kwargs): super().__init__(**kwargs) if self.framework == "tf": raise ValueError(f"The {self.__class__} is only available in PyTorch.") requires_backends(self, "vision") self.check_model_type(MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES) def __call__( self, image: Union[str, "Image.Image", List[Dict[str, Any]]], candidate_labels: Union[str, List[str]] = None, **kwargs, ): """ Detect objects (bounding boxes & classes) in the image(s) passed as inputs. Args: image (`str`, `PIL.Image` or `List[Dict[str, Any]]`): The pipeline handles three types of images: - A string containing an http url pointing to an image - A string containing a local path to an image - An image loaded in PIL directly You can use this parameter to send directly a list of images, or a dataset or a generator like so: ```python >>> from transformers import pipeline >>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection") >>> detector( ... [ ... { ... "image": "http://images.cocodataset.org/val2017/000000039769.jpg", ... "candidate_labels": ["cat", "couch"], ... }, ... { ... "image": "http://images.cocodataset.org/val2017/000000039769.jpg", ... "candidate_labels": ["cat", "couch"], ... }, ... ] ... ) [[{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.25, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}], [{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}]] ``` candidate_labels (`str` or `List[str]` or `List[List[str]]`): What the model should recognize in the image. threshold (`float`, *optional*, defaults to 0.1): The probability necessary to make a prediction. top_k (`int`, *optional*, defaults to None): The number of top predictions that will be returned by the pipeline. If the provided number is `None` or higher than the number of predictions available, it will default to the number of predictions. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A list of lists containing prediction results, one list per input image. Each list contains dictionaries with the following keys: - **label** (`str`) -- Text query corresponding to the found object. - **score** (`float`) -- Score corresponding to the object (between 0 and 1). - **box** (`Dict[str,int]`) -- Bounding box of the detected object in image's original size. It is a dictionary with `x_min`, `x_max`, `y_min`, `y_max` keys. """ if "text_queries" in kwargs: candidate_labels = kwargs.pop("text_queries") if isinstance(image, (str, Image.Image)): inputs = {"image": image, "candidate_labels": candidate_labels} else: inputs = image results = super().__call__(inputs, **kwargs) return results def _sanitize_parameters(self, **kwargs): preprocess_params = {} if "timeout" in kwargs: preprocess_params["timeout"] = kwargs["timeout"] postprocess_params = {} if "threshold" in kwargs: postprocess_params["threshold"] = kwargs["threshold"] if "top_k" in kwargs: postprocess_params["top_k"] = kwargs["top_k"] return preprocess_params, {}, postprocess_params def preprocess(self, inputs, timeout=None): image = load_image(inputs["image"], timeout=timeout) candidate_labels = inputs["candidate_labels"] if isinstance(candidate_labels, str): candidate_labels = candidate_labels.split(",") target_size = torch.tensor([[image.height, image.width]], dtype=torch.int32) for i, candidate_label in enumerate(candidate_labels): text_inputs = self.tokenizer(candidate_label, return_tensors=self.framework) image_features = self.image_processor(image, return_tensors=self.framework) yield { "is_last": i == len(candidate_labels) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def _forward(self, model_inputs): target_size = model_inputs.pop("target_size") candidate_label = model_inputs.pop("candidate_label") is_last = model_inputs.pop("is_last") outputs = self.model(**model_inputs) model_outputs = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def postprocess(self, model_outputs, threshold=0.1, top_k=None): results = [] for model_output in model_outputs: label = model_output["candidate_label"] model_output = BaseModelOutput(model_output) outputs = self.image_processor.post_process_object_detection( outputs=model_output, threshold=threshold, target_sizes=model_output["target_size"] )[0] for index in outputs["scores"].nonzero(): score = outputs["scores"][index].item() box = self._get_bounding_box(outputs["boxes"][index][0]) result = {"score": score, "label": label, "box": box} results.append(result) results = sorted(results, key=lambda x: x["score"], reverse=True) if top_k: results = results[:top_k] return results def _get_bounding_box(self, box: "torch.Tensor") -> Dict[str, int]: """ Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... } Args: box (`torch.Tensor`): Tensor containing the coordinates in corners format. Returns: bbox (`Dict[str, int]`): Dict containing the coordinates in corners format. """ if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.") xmin, ymin, xmax, ymax = box.int().tolist() bbox = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
transformers/src/transformers/pipelines/zero_shot_object_detection.py/0
{ "file_path": "transformers/src/transformers/pipelines/zero_shot_object_detection.py", "repo_id": "transformers", "token_count": 4201 }
361
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Union import numpy as np import tensorflow as tf from .feature_extraction_utils import BatchFeature from .tokenization_utils_base import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) def shape_list(tensor: Union[tf.Tensor, np.ndarray]) -> List[int]: """ Deal with dynamic shape in tensorflow cleanly. Args: tensor (`tf.Tensor` or `np.ndarray`): The tensor we want the shape of. Returns: `List[int]`: The shape of the tensor as a list. """ if isinstance(tensor, np.ndarray): return list(tensor.shape) dynamic = tf.shape(tensor) if tensor.shape == tf.TensorShape(None): return dynamic static = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(static)] def stable_softmax(logits: tf.Tensor, axis: Optional[int] = None, name: Optional[str] = None) -> tf.Tensor: """ Stable wrapper that returns the same output as `tf.nn.softmax`, but that works reliably with XLA on CPU. It is meant as a workaround for the [following issue](https://github.com/tensorflow/tensorflow/issues/55682), and will be removed after it gets fixed. The arguments and outputs are the same as `tf.nn.softmax`, and relies on the fact that `softmax(x) = softmax(x + c)` (see https://ogunlao.github.io/2020/04/26/you_dont_really_know_softmax.html). Args: logits (`tf.Tensor`): Must be one of the following types: half, float32, float64. axis (`int`, *optional*): The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name (`str`, *optional*): A name for the operation. Returns: `tf.Tensor`: A Tensor. Has the same type and shape as logits. """ # TODO: When the issue linked above gets sorted, add a check on TF version here and use the original function if # it has the fix. After we drop the support for unfixed versions, remove this function. return tf.nn.softmax(logits=logits + 1e-9, axis=axis, name=name) def functional_layernorm(inputs, weight, bias, epsilon=1e-5, axis=-1): # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(axis, int): raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis.") # Get mean and variance on the axis to be normalized mean, variance = tf.nn.moments(inputs, axes=[axis], keepdims=True) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis shape = [1] * inputs.shape.rank shape[axis] = shape_list(inputs)[axis] weight = tf.reshape(weight, shape) bias = tf.reshape(bias, shape) # Compute layer normalization using the batch_normalization # function. outputs = tf.nn.batch_normalization( inputs, mean, variance, offset=bias, scale=weight, variance_epsilon=epsilon, ) return outputs def flatten(input, start_dim=0, end_dim=-1): # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input in_shape = tf.shape(input) flattened_dim = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1]) out_shape = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0) return tf.reshape(input, out_shape) def invert_attention_mask(encoder_attention_mask: tf.Tensor) -> tf.Tensor: """ Invert an attention mask (e.g., switches 0. and 1.). Args: encoder_attention_mask (`torch.Tensor`): An attention mask. Returns: `tf.Tensor`: The inverted attention mask. """ if not isinstance(encoder_attention_mask, tf.Tensor): encoder_attention_mask = tf.convert_to_tensor(encoder_attention_mask) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) encoder_extended_attention_mask = ( tf.cast(1, encoder_attention_mask.dtype) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def check_embeddings_within_bounds(tensor: tf.Tensor, embed_dim: int, tensor_name: str = "input_ids") -> None: """ `tf.gather`, on which TF embedding layers are based, won't check positive out of bound indices on GPU, returning zeros instead. This function adds a check against that dangerous silent behavior. Args: tensor (`tf.Tensor`): The tensor of indices to check. embed_dim (`int`): The embedding dimension. tensor_name (`str`, *optional*): The name of the tensor to use in the error message. """ tf.debugging.assert_less( tensor, tf.cast(embed_dim, dtype=tensor.dtype), message=( f"The maximum value of {tensor_name} ({tf.math.reduce_max(tensor)}) must be smaller than the embedding " f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time." ), ) def save_attributes_to_hdf5_group(group, name, data): """Saves attributes (data) of the specified name into the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to save. data: Attributes data to store. Raises: RuntimeError: If any single attribute is too large to be saved. Copied from Keras to Transformers to avoid versioning issues. """ HDF5_OBJECT_HEADER_LIMIT = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( "The following attributes cannot be saved to HDF5 file because " f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} " f"bytes: {bad_attributes}" ) data_npy = np.asarray(data) num_chunks = 1 chunked_data = np.array_split(data_npy, num_chunks) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data): num_chunks += 1 chunked_data = np.array_split(data_npy, num_chunks) if num_chunks > 1: for chunk_id, chunk_data in enumerate(chunked_data): group.attrs["%s%d" % (name, chunk_id)] = chunk_data else: group.attrs[name] = data def load_attributes_from_hdf5_group(group, name): """Loads attributes of the specified name from the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to load. Returns: data: Attributes data. Copied from Keras to Transformers to avoid versioning issues. """ if name in group.attrs: data = [n.decode("utf8") if hasattr(n, "decode") else n for n in group.attrs[name]] else: data = [] chunk_id = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("utf8") if hasattr(n, "decode") else n for n in group.attrs["%s%d" % (name, chunk_id)]] ) chunk_id += 1 return data def expand_1d(data): """Expands 1-dimensional `Tensor`s into 2-dimensional `Tensor`s. Copied from Keras to here to avoid versioning issues.""" def _expand_single_1d_tensor(t): if isinstance(t, tf.Tensor) and t.shape.rank == 1: return tf.expand_dims(t, axis=-1) return t return tf.nest.map_structure(_expand_single_1d_tensor, data) def convert_batch_encoding(*args, **kwargs): # Convert HF BatchEncoding/BatchFeature objects in the inputs to dicts that Keras understands if args and isinstance(args[0], (BatchEncoding, BatchFeature)): args = list(args) args[0] = dict(args[0]) elif "x" in kwargs and isinstance(kwargs["x"], (BatchEncoding, BatchFeature)): kwargs["x"] = dict(kwargs["x"]) return args, kwargs
transformers/src/transformers/tf_utils.py/0
{ "file_path": "transformers/src/transformers/tf_utils.py", "repo_id": "transformers", "token_count": 3894 }
362
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx._compatibility import compatibility from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..pytorch_utils import is_torch_greater_or_equal_than_2_0 from ..utils import ( ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, get_torch_version, is_peft_available, is_torch_fx_available, ) if is_peft_available(): from peft import PeftModel logger = logging.get_logger(__name__) _IS_IN_DEBUG_MODE = os.environ.get("FX_DEBUG_MODE", "").upper() in ENV_VARS_TRUE_VALUES def _generate_supported_model_class_names( model_name: Type[PretrainedConfig], supported_tasks: Optional[Union[str, List[str]]] = None, ) -> List[str]: task_mapping = { "default": MODEL_MAPPING_NAMES, "pretraining": MODEL_FOR_PRETRAINING_MAPPING_NAMES, "next-sentence-prediction": MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, "masked-lm": MODEL_FOR_MASKED_LM_MAPPING_NAMES, "causal-lm": MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, "seq2seq-lm": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "speech-seq2seq": MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, "multiple-choice": MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, "document-question-answering": MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, "question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, "sequence-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, "token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, "masked-image-modeling": MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, "image-classification": MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, "zero-shot-image-classification": MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES, "ctc": MODEL_FOR_CTC_MAPPING_NAMES, "audio-classification": MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, "semantic-segmentation": MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, "backbone": MODEL_FOR_BACKBONE_MAPPING_NAMES, } if supported_tasks is None: supported_tasks = task_mapping.keys() if isinstance(supported_tasks, str): supported_tasks = [supported_tasks] model_class_names = [] for task in supported_tasks: class_name = task_mapping[task].get(model_name, None) if class_name: model_class_names.append(class_name) return model_class_names _REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS = [ "altclip", "albert", "bart", "bert", "blenderbot", "blenderbot-small", "bloom", "clip", "convnext", "deberta", "deberta-v2", "dinov2", "distilbert", "donut-swin", "electra", "gpt2", "gpt_neo", "gptj", "hubert", "layoutlm", "llama", "lxmert", "m2m_100", "marian", "mbart", "megatron-bert", "mobilebert", "mt5", "nezha", "opt", "pegasus", "plbart", "resnet", "roberta", "segformer", "speech_to_text", "speech_to_text_2", "swin", "t5", "trocr", "vit", "xglm", "wav2vec2", # "xlnet", ] _FX_SUPPORTED_MODELS_WITH_KV_CACHE = ["llama", "opt"] _REGULAR_SUPPORTED_MODELS = [] for item in _REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS: if isinstance(item, dict): _REGULAR_SUPPORTED_MODELS.extend(_generate_supported_model_class_names(**item)) else: _REGULAR_SUPPORTED_MODELS.extend(_generate_supported_model_class_names(item)) _SPECIAL_SUPPORTED_MODELS = [ "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", "AltCLIPTextModel", "AltCLIPVisionModel", "GitVisionModel", "GPT2DoubleHeadsModel", "Speech2Text2Decoder", "TrOCRDecoder", "PeftModelForCausalLM", "PeftModelForSeq2SeqLM", # TODO: add support for them as it should be quite easy to do so (small blocking issues). # XLNetForQuestionAnswering, ] _SUPPORTED_MODELS = tuple(sorted(set(_REGULAR_SUPPORTED_MODELS + _SPECIAL_SUPPORTED_MODELS))) def torch_nn_embedding(self, input): return torch.empty(*input.shape, self.weight.shape[-1], device="meta", dtype=self.weight.dtype) def torch_nn_functional_embedding( input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False ): return torch.empty(*input.shape, weight.shape[-1], device="meta", dtype=weight.dtype) def torch_nn_layernorm(self, input): return input def torch_nn_groupnorm(self, input): return input def torch_nn_linear(self, input): return torch.empty(input.shape[:-1] + (self.out_features,), device="meta") def torch_relu(x): return x def torch_nn_relu(self, x): return x def torch_nn_functional_relu(x, inplace=False): if not inplace: raise ValueError("Don't support in-place functional.relu for MetaTensor analysis") return x def torch_where(condition, x, y): # torch.where returns the broadcasted tensor of condition, x, and y, # so hack it by using addition return condition.to(device="meta") + x.to(device="meta") + y.to(device="meta") def torch_abs(input, *, out=None): if out is not None: raise ValueError("Don't support in-place abs for MetaTensor analysis") return input def torch_arange(*args, **kwargs): n = len(args) step = 1 if n == 1: start = 0 end = args[0] elif n == 2: start, end = args else: start, end, step = args if isinstance(start, float): start = int(start) if isinstance(end, float): start = int(end) if isinstance(step, float): step = int(step) step = kwargs.get("step", step) dtype = kwargs.get("dtype") return torch.empty((end - start) // step, dtype=dtype, device="meta") def torch_full(*args, **kwargs): args = list(args) if isinstance(args[1], torch.Tensor) and args[1].device == torch.device("meta"): args[1] = 1 # Any value. kwargs_without_device = dict(kwargs) kwargs_without_device.pop("device", None) return torch.full(*args, **kwargs_without_device) def torch_cat(tensors, dim=None, axis=None, *, out=None): if dim is None and axis is None: dim = 0 if dim is None and axis is not None: dim = axis if dim < 0: dim = tensors[0].dim() + dim shapes = [t.shape for t in tensors] shape = list(shapes[0]) concatenated_dim = sum(shape[dim] for shape in shapes) final_shape = shape[:dim] + [concatenated_dim] + shape[dim + 1 :] return torch.empty(final_shape, device="meta") def torch_stack(tensors, dim=None, axis=None, *, out=None): if dim is None and axis is None: dim = 0 if dim is None and axis is not None: dim = axis if dim < 0: dim = tensors[0].dim() + 1 + dim shape = list(tensors[0].shape) shape.insert(dim, len(tensors)) return torch.empty(shape, device="meta") def torch_add(input, other, *, alpha=1, out=None): if not isinstance(input, torch.Tensor): return torch.empty_like(other, device="meta") if not isinstance(other, torch.Tensor): return torch.empty_like(input, device="meta") max_length = max(input.dim(), other.dim()) input_shape = list(input.shape) + [1] * (max_length - input.dim()) other_shape = list(other.shape) + [1] * (max_length - other.dim()) shape = [] for i in range(max_length): shape.append(max(input_shape[i], other_shape[i])) return torch.empty(shape, device="meta") def torch_mul(input, other, *, out=None): return torch_add(input, other, out=out) def torch_tensor_mul(self, other): return torch_mul(self, other) def torch_matmul(input, other, *, out=None): d1 = input.dim() d2 = other.dim() shape = None if d1 == 1 and d2 == 1: shape = None elif d1 == 2 and d2 == 2: shape = (input.size(0), other.size(1)) elif d1 == 1 and d2 == 2: shape = (other.size(1),) elif d1 == 2 and d1 == 1: shape = (input.size(0),) else: max_length = max(input.dim(), other.dim()) shape1 = list(input.shape) shape2 = list(other.shape) if d1 == 1: shape1 = [1] + shape1 if d2 == 1: shape2.append(1) shape1 = [-1] * (max_length - d1) + list(input.shape) shape2 = [-1] * (max_length - d2) + list(other.shape) shape = [] for i in range(max_length): shape.append(max(shape1[i], shape2[i])) shape[-2] = shape1[-2] shape[-1] = shape2[-1] if d1 == 1: shape.pop(-2) if d2 == 1: shape.pop(-1) if shape is None: return torch.tensor(0.0, device="meta") return torch.empty(*shape, device="meta") def torch_bmm(input, mat2, *, out=None): if out is not None: raise ValueError("Don't support in-place bmm for MetaTensor analysis") batch_size, n, m = input.shape _, _, p = mat2.shape return torch.empty(batch_size, n, p, device="meta") def torch_baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None): if out is not None: raise ValueError("Don't support in-place baddbmm for MetaTensor analysis") return torch_bmm(batch1, batch2) def torch_tensor_baddbmm(self, batch1, batch2, *, beta=1, alpha=1, out=None): return torch_baddbmm(self, batch1, batch2, beta=beta, alpha=alpha, out=out) def torch_einsum(equation, *operands): # TODO: infer shape without performing the computation, this might be quite hard. concrete_operands = (torch.empty_like(operand, device="cpu") for operand in operands) return torch.einsum(equation, *concrete_operands).to("meta") def torch_tensor_repeat(self, *sizes): shape = list(self.shape) for i, x in enumerate(sizes): shape[i] *= x return torch.empty(shape, device="meta") def torch_repeat_interleave(*args, dim=None, output_size=None): num_args = len(args) if num_args == 1: shape = [output_size if output_size is not None else args[0].sum()] else: shape = list(args[0].shape) if dim is None: if num_args > 2: dim = args[2] else: shape = [sum(shape)] dim = 0 repeats = args[1] if isinstance(repeats, int) or torch.numel(repeats) == 1: shape[dim] *= int(repeats) else: shape[dim] = output_size if output_size is not None else repeats.sum() return torch.empty(*shape, device="meta") def torch_index_select(input, dim, index, *, out=None): shape = list(input.shape) shape[dim] = len(index) return torch.empty(*shape, device="meta") def torch_tensor_index_select(self, dim, index): return torch_index_select(self, dim, index) def torch_gather(input, dim, index, *, sparse_grad=False, out=None): shape = list(input.shape) shape[dim] = index.shape[dim] return torch.empty(*shape, device="meta") def torch_tensor_gather(self, dim, index): return torch_gather(self, dim, index) def torch_roll(input, shifts, dims=None): return input def torch_flip(input, dims): return input def torch_tensor_flip(self, dims): return self def torch_nn_conv1d(self, input): l_in = input.shape[-1] shape = None padding = self.padding if padding == "valid": padding = (0, 0) if padding == "same": shape = list(input.shape) if shape is None: shape = list(input.shape) l_out = math.floor( (l_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1 ) shape[-1] = l_out shape[-2] = self.out_channels return torch.empty(shape, device="meta") def torch_nn_conv2d(self, input): h_in, w_in = input.shape[-2:] shape = None padding = self.padding if padding == "valid": padding = (0, 0) if padding == "same": shape = list(input.shape) if shape is None: shape = list(input.shape) h_out = math.floor( (h_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1 ) w_out = math.floor( (w_in + 2 * padding[1] - self.dilation[1] * (self.kernel_size[1] - 1) - 1) / self.stride[1] + 1 ) shape[-2:] = [h_out, w_out] shape[-3] = self.out_channels return torch.empty(shape, device="meta") def torch_squeeze(input, dim=None): shape = list(input.shape) if dim is not None: if dim < 0: dim = input.dim() + dim if shape[dim] == 1: shape.pop(dim) else: new_shape = [] for dim_value in shape: if dim_value == 1: continue new_shape.append(dim_value) shape = new_shape return torch.empty(shape, device="meta") def torch_tensor_squeeze(self, dim=None): return torch_squeeze(self, dim) def torch_unsqueeze(input, dim): shape = list(input.shape) if dim < 0: dim = input.dim() + 1 + dim shape.insert(dim, 1) return torch.empty(shape, device="meta") def torch_tensor_unsqueeze(self, dim): return torch_unsqueeze(self, dim) def torch_unique_consecutive(input, **kwargs): output = torch.unique_consecutive(torch.zeros_like(input, device="cpu"), **kwargs) if isinstance(output, torch.Tensor): return output.to("meta") else: return tuple(map(output, lambda x: x.to("meta"))) def torch_nn_functional_one_hot(tensor, num_classes=-1): if num_classes < 0: raise ValueError("Don't support automatic num_classes inference for MetaTensor analysis") shape = list(tensor.shape) + [num_classes] return torch.empty(shape, device="meta") def torch_nn_functional_scaled_dot_product_attention( query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, scale=None ): target_length = query.shape[-2] head_dim = value.shape[-1] return torch.empty((*query.shape[:-2], target_length, head_dim), device="meta") def torch_nn_mseloss(self, input, target): if self.reduction == "none": shape = target.shape else: shape = (1,) return torch.empty(shape, device="meta") def torch_nn_crossentropyloss(self, input, target): if self.reduction == "none": shape = target.shape else: shape = (1,) return torch.empty(shape, device="meta") def torch_nn_bcewithlogitsloss(self, input, target): if self.reduction == "none": shape = target.shape else: shape = (1,) return torch.empty(shape, device="meta") def operator_getitem(a, b): def to_concrete(t): if isinstance(t, torch.Tensor): concrete = torch.ones_like(t, device="cpu") if concrete.dtype in [torch.float16, torch.float32, torch.float64, torch.int32]: concrete = concrete.to(torch.int64) return concrete return t if isinstance(a, torch.Tensor): # TODO: infer shape without performing the computation. if isinstance(b, tuple): b = tuple(map(to_concrete, b)) else: b = to_concrete(b) return operator.getitem(torch.empty_like(a, device="cpu"), b).to("meta") return operator.getitem(a, b) _MANUAL_META_OVERRIDES: Dict[Callable, Callable] = { torch.nn.Embedding: torch_nn_embedding, torch.nn.functional.embedding: torch_nn_functional_embedding, torch.nn.LayerNorm: torch_nn_layernorm, torch.nn.GroupNorm: torch_nn_groupnorm, torch.nn.Linear: torch_nn_linear, torch.relu: torch_relu, torch.nn.functional.relu: torch_nn_functional_relu, torch.nn.ReLU: torch_nn_relu, torch.where: torch_where, torch.abs: torch_abs, torch.arange: torch_arange, torch.full: torch_full, torch.cat: torch_cat, torch.stack: torch_stack, torch.add: torch_add, torch.mul: torch_mul, torch.Tensor.mul: torch_tensor_mul, torch.matmul: torch_matmul, torch.bmm: torch_bmm, torch.baddbmm: torch_baddbmm, torch.Tensor.baddbmm: torch_tensor_baddbmm, torch.einsum: torch_einsum, torch.Tensor.repeat: torch_tensor_repeat, torch.repeat_interleave: torch_repeat_interleave, torch.roll: torch_roll, torch.flip: torch_flip, torch.Tensor.flip: torch_tensor_flip, torch.index_select: torch_index_select, torch.Tensor.index_select: torch_tensor_index_select, torch.gather: torch_gather, torch.Tensor.gather: torch_tensor_gather, torch.nn.Conv1d: torch_nn_conv1d, torch.nn.Conv2d: torch_nn_conv2d, torch.squeeze: torch_squeeze, torch.Tensor.squeeze: torch_tensor_squeeze, torch.unsqueeze: torch_unsqueeze, torch.Tensor.unsqueeze: torch_tensor_unsqueeze, torch.unique_consecutive: torch_unique_consecutive, torch.nn.functional.one_hot: torch_nn_functional_one_hot, torch.nn.MSELoss: torch_nn_mseloss, torch.nn.CrossEntropyLoss: torch_nn_crossentropyloss, torch.nn.BCEWithLogitsLoss: torch_nn_bcewithlogitsloss, operator.getitem: operator_getitem, } if is_torch_greater_or_equal_than_2_0: _MANUAL_META_OVERRIDES[ torch.nn.functional.scaled_dot_product_attention ] = torch_nn_functional_scaled_dot_product_attention class HFProxy(Proxy): """ Proxy that uses metadata to handle data-dependent control-flow. """ def install_metadata(self, metadata): self._metadata = metadata @property def shape(self): return self.tracer.create_proxy("call_method", "size", (self,), {}) @property def device(self): # Hack so we can track when devices are used. During meta-tensor propagation, # replace these values with a constant 'meta' return MetaDeviceAttribute(self, "device") def __len__(self): if hasattr(self, "_metadata") and self._metadata is not None: return len(self._metadata) return super().__len__() def __bool__(self): if hasattr(self, "_metadata") and self._metadata is not None: return self._metadata return super().__bool__() def __getattr__(self, k): if k == "_metadata": return self.__getattribute__(k) # note: not added to the graph yet, if this is a method call # we peephole optimize to the method invocation return HFAttribute(self, k) def __setitem__(self, indices, values): return self.tracer.create_proxy("call_function", operator.setitem, (self, indices, values), {}) def __contains__(self, key): if hasattr(self, "_metadata") and self._metadata is not None: return key in self._metadata return super().__contains__(key) class HFAttribute(HFProxy): def __init__(self, root, attr: str): self.root = root self.attr = attr self.tracer = root.tracer self._node = None if hasattr(self.root, "_metadata"): self.install_metadata(getattr(self.root._metadata, attr)) @property def node(self): # the node for attributes is added lazily, since most will just be method calls # which do not rely on the getitem call if self._node is None: self._node = self.tracer.create_proxy("call_function", builtins.getattr, (self.root, self.attr), {}).node return self._node def __call__(self, *args, **kwargs): return self.tracer.create_proxy("call_method", self.attr, (self.root,) + args, kwargs) class MetaDeviceAttribute(HFAttribute): pass def _proxies_to_metas(v): """Returns the underlying metadata for HFProxies, and behaves like the identity for the others.""" if isinstance(v, MetaDeviceAttribute): return "meta" if isinstance(v, torch.fx.Proxy): if not (isinstance(v, HFProxy) and hasattr(v, "_metadata")): raise RuntimeError(f"No metadata was found for {v}") return v._metadata return v def _gen_constructor_wrapper(target): @functools.wraps(target) def wrapper(*args, **kwargs): proxy = None def check_has_proxy(v): if isinstance(v, Proxy): nonlocal proxy proxy = v torch.fx.node.map_aggregate(args, check_has_proxy) torch.fx.node.map_aggregate(kwargs, check_has_proxy) if proxy is not None: return proxy.tracer.create_proxy("call_function", target, args, kwargs) else: return target(*args, **kwargs) return wrapper, target def _generate_random_int(low: int = 10, high: int = 20, forbidden_values: Optional[List[int]] = None): if forbidden_values is None: forbidden_values = [] value = random.randint(low, high) while value in forbidden_values: value = random.randint(low, high) return value class HFTracer(Tracer): """ Tracer that is able to symbolically trace models from the library. To do that, it uses the HFProxy instead of the regular PyTorch torch.fx.Proxy. """ # Feature flag for proxying accesses to buffer values proxy_buffer_attributes: bool = True allow_insert_stateless_mods: bool = True _TORCH_METHODS_TO_PATCH = [ "arange", "zeros", "ones", "full", "full_like", "eye", "empty", "tensor", "clamp", "finfo", ] supported_archs = (PreTrainedModel,) if not is_peft_available() else (PreTrainedModel, PeftModel) def __init__(self, autowrap_modules=(math,), autowrap_functions=()): super().__init__(autowrap_modules=autowrap_modules, autowrap_functions=autowrap_functions) if not is_torch_fx_available(): raise ImportError( f"Found an incompatible version of torch. Found version {get_torch_version()}, but only version " f"{TORCH_FX_REQUIRED_VERSION} is supported." ) def _generate_dummy_input( self, model: PreTrainedModel, input_name: str, shape: List[int], input_names: List[str] ) -> Dict[str, torch.Tensor]: """Generates dummy input for model inference recording.""" # Retrieving the model class, either from the "class_for_deserialization" attribute if the model was restored # from pickle, or from the "__class__" attribute in the general case. model_class_name = getattr(model, "class_for_deserialization", model.__class__).__name__ device = model.device inputs_dict = {} # when tracing a model with KV cache, we simply need to unsure that the KV cache length is larger than one to # rightfully pass certain controlflows (Example: https://github.com/huggingface/transformers/blob/5c8d941d66734811d2ef6f57f15b44f7fb7a98c4/src/transformers/modeling_attn_mask_utils.py#L162). # After tracing, the model can then still be used with arbitrary lengths different than the one used during tracing. kv_cache_length = 5 if input_name in ["labels", "start_positions", "end_positions"]: batch_size = shape[0] if model_class_name in [ *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES), *get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), *get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES), ]: inputs_dict["labels"] = torch.zeros(batch_size, dtype=torch.long, device=device) elif model_class_name in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), "XLNetForQuestionAnswering", ]: inputs_dict["start_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device) inputs_dict["end_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device) elif model_class_name in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES): if not hasattr(model.config, "problem_type") or model.config.problem_type is None: raise ValueError( "Could not retrieve the problem type for the sequence classification task, please set " 'model.config.problem_type to one of the following values: "regression", ' '"single_label_classification", or "multi_label_classification".' ) if model.config.problem_type == "regression": labels_shape = (batch_size, model.config.num_labels) labels_dtype = torch.float32 elif model.config.problem_type == "single_label_classification": labels_shape = (batch_size,) labels_dtype = torch.long elif model.config.problem_type == "multi_label_classification": labels_shape = (batch_size, model.config.num_labels) labels_dtype = torch.float32 else: raise ValueError( 'Expected model.config.problem_type to be either: "regression", "single_label_classification"' f', or "multi_label_classification", but "{model.config.problem_type}" was provided.' ) inputs_dict["labels"] = torch.zeros(*labels_shape, dtype=labels_dtype, device=device) elif model_class_name in [ *get_values(MODEL_FOR_PRETRAINING_MAPPING_NAMES), *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES), *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES), "GPT2DoubleHeadsModel", "PeftModelForCausalLM", "PeftModelForSeq2SeqLM", ]: inputs_dict["labels"] = torch.zeros(shape, dtype=torch.long, device=device) elif model_class_name in [*get_values(MODEL_FOR_CTC_MAPPING_NAMES)]: inputs_dict["labels"] = torch.zeros(shape, dtype=torch.float32, device=device) else: raise NotImplementedError( f"Generating the dummy input named {input_name} for {model_class_name} is not supported yet." ) elif "pixel_values" in input_name: batch_size = shape[0] image_size = getattr(model.config, "image_size", None) if image_size is None: if hasattr(model.config, "vision_config"): image_size = model.config.vision_config.image_size elif hasattr(model.config, "encoder"): image_size = model.config.encoder.image_size else: image_size = (_generate_random_int(), _generate_random_int()) # If no num_channels is in the config, use some arbitrary value. num_channels = getattr(model.config, "num_channels", 3) if not isinstance(image_size, collections.abc.Iterable): image_size = (image_size, image_size) height, width = image_size inputs_dict[input_name] = torch.zeros( batch_size, num_channels, height, width, dtype=torch.float32, device=device ) elif "bbox" in input_name: inputs_dict[input_name] = torch.zeros(*shape, 4, dtype=torch.float, device=device) elif "input_features" in input_name: inputs_dict[input_name] = torch.zeros( *shape, model.config.input_feat_per_channel, dtype=torch.float, device=device ) elif "visual_feats" in input_name: inputs_dict[input_name] = torch.zeros( shape + [ model.config.visual_feat_dim, ], dtype=torch.float, device=device, ) elif "visual_pos" in input_name: inputs_dict[input_name] = torch.zeros( shape + [ model.config.visual_pos_dim, ], dtype=torch.float, device=device, ) elif "inputs" in input_name: inputs_dict[input_name] = torch.zeros(*shape, dtype=torch.float, device=device) elif "input_values" in input_name: batch_size, _ = shape # Generating big sequence length for audio inputs. seq_length = _generate_random_int(low=10000, high=20000) inputs_dict[input_name] = torch.zeros(batch_size, seq_length, dtype=torch.float, device=device) elif "mask" in input_name: if "past_key_values" in input_names: mask_shape = [shape[0], shape[1] + kv_cache_length] else: mask_shape = shape inputs_dict[input_name] = torch.zeros(mask_shape, dtype=torch.long, device=device) elif "ids" in input_name: inputs_dict[input_name] = torch.zeros(shape, dtype=torch.long, device=device) elif "past_key_values" in input_name: if model.config.model_type not in _FX_SUPPORTED_MODELS_WITH_KV_CACHE: raise NotImplementedError( f"Symbolic trace with past_key_values input is not supported yet for the model {model.config.model_type}. Please open an issue or a PR in Transformers repository if you would like to see the support added." ) num_heads = model.config.num_attention_heads head_dim = model.config.hidden_size // model.config.num_attention_heads cache_shape = (shape[0], num_heads, kv_cache_length, head_dim) pkv = tuple( ( torch.rand(cache_shape, dtype=torch.float, device=device), torch.rand(cache_shape, dtype=torch.float, device=device), ) for i in range(model.config.num_hidden_layers) ) inputs_dict[input_name] = pkv else: shape_with_hidden_size = shape + [model.config.hidden_size] inputs_dict[input_name] = torch.zeros(shape_with_hidden_size, dtype=torch.float, device=device) return inputs_dict def create_proxy(self, kind, target, args, kwargs, name=None, type_expr=None, proxy_factory_fn=None): rv = super().create_proxy(kind, target, args, kwargs, name, type_expr, proxy_factory_fn) if kind == "placeholder" and target in self.meta_args: rv.install_metadata(self.meta_args[target]) return rv if target in self.orig_fns: # NOTE: tensor constructors in PyTorch define the `device` argument as # *kwargs-only*. That is why this works. If you add methods to # _TORCH_METHODS_TO_PATCH that do not define `device` as kwarg-only, # this will break and you will likely see issues where we cannot infer # the size of the output. if "device" in kwargs: kwargs["device"] = "meta" try: args_metas = torch.fx.node.map_aggregate(args, _proxies_to_metas) kwargs_metas = torch.fx.node.map_aggregate(kwargs, _proxies_to_metas) if kind == "call_function": meta_target = _MANUAL_META_OVERRIDES.get(target, target) meta_out = meta_target(*args_metas, **kwargs_metas) if isinstance(meta_out, torch.Tensor): meta_out = meta_out.to(device="meta") elif kind == "call_method": method = getattr(args_metas[0].__class__, target) meta_target = _MANUAL_META_OVERRIDES.get(method, method) meta_out = meta_target(*args_metas, **kwargs_metas) elif kind == "call_module": if not hasattr(self, "orig_forward"): raise AttributeError(f"{self} does not have an attribute called orig_forward") self._disable_module_getattr = True try: mod = self.root.get_submodule(target) mod_type = type(mod) if mod_type in _MANUAL_META_OVERRIDES: meta_out = _MANUAL_META_OVERRIDES[mod_type](mod, *args_metas, **kwargs_metas) else: meta_out = self.orig_forward(*args_metas, **kwargs_metas) finally: self._disable_module_getattr = False elif kind == "get_attr": self._disable_module_getattr = True try: attr_itr = self.root atoms = target.split(".") for atom in atoms: attr_itr = getattr(attr_itr, atom) if isinstance(attr_itr, torch.Tensor): meta_out = attr_itr.to(device="meta") else: meta_out = attr_itr finally: self._disable_module_getattr = False else: return rv if not isinstance(rv, Proxy): raise ValueError("Don't support composite output yet") rv.install_metadata(meta_out) except Exception as e: if _IS_IN_DEBUG_MODE: warnings.warn(f"Could not compute metadata for {kind} target {target}: {e}") return rv # Replaced by .getattr from PyTorch 1.13 def _module_getattr(self, attr, attr_val, parameter_proxy_cache): if getattr(self, "_disable_module_getattr", False): return attr_val else: def maybe_get_proxy_for_attr(attr_val, collection_to_search, parameter_proxy_cache): for n, p in collection_to_search: if attr_val is p: if n not in parameter_proxy_cache: kwargs = {} if "proxy_factory_fn" in inspect.signature(self.create_proxy).parameters: kwargs["proxy_factory_fn"] = ( None if not self.param_shapes_constant else lambda node: ParameterProxy(self, node, n, attr_val) ) val_proxy = self.create_proxy("get_attr", n, (), {}, **kwargs) # type: ignore[arg-type] parameter_proxy_cache[n] = val_proxy return parameter_proxy_cache[n] return None if isinstance(attr_val, torch.nn.Parameter): maybe_parameter_proxy = maybe_get_proxy_for_attr( attr_val, self.root.named_parameters(), parameter_proxy_cache ) if maybe_parameter_proxy is not None: return maybe_parameter_proxy if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor): maybe_buffer_proxy = maybe_get_proxy_for_attr( attr_val, self.root.named_buffers(), parameter_proxy_cache ) if maybe_buffer_proxy is not None: return maybe_buffer_proxy return attr_val # Needed for PyTorch 1.13+ def getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: Dict[str, Any]): return self._module_getattr(attr, attr_val, parameter_proxy_cache) def call_module(self, m, forward, args, kwargs): self.orig_forward = forward return super().call_module(m, forward, args, kwargs) def proxy(self, node): return HFProxy(node, self) def trace( self, root: Union[torch.nn.Module, Callable[..., Any]], concrete_args: Optional[Dict[str, Any]] = None, dummy_inputs: Optional[Dict[str, Any]] = None, complete_concrete_args_with_inputs_not_in_dummy_inputs: bool = True, ) -> Graph: """ Traces `root` and returns the corresponding FX `torch.fx.Graph` representation. `root` can either be a `torch.nn.Module` instance or a Python callable. Note that after this call, `self.root` may be different from the `root` passed in here. For example, when a free function is passed to `trace()`, we will create a `torch.nn.Module` instance to use as the root and add embedded constants to. Args: root (`torch.nn.Module` or `Callable`): Either a `torch.nn.Module`` or a function to be traced through. If root is not a [`~transformers.PreTrainedModel`], then `dummy_inputs` must be passed, otherwise tracing will fail. concrete_args (`Dict[str, Any], *optional*): Concrete arguments that should not be treated as Proxies dummy_inputs (`Dict[str, Any]`, *optional*): The dummy inputs needed to handle data-dependent control-flow if `root` is not a [`~transformers.PreTrainedModel`]. It can also be used when `root` is a [`~transformers.PreTrainedModel`] to specify custom dummy inputs for a subset or all the model inputs. complete_concrete_args_with_inputs_not_in_dummy_inputs (`bool`, *optional*, defaults to `True`): If `True`, and `dummy_inputs` is specified, every argument that `root` can take that is not in `dummy_inputs` and not in `concrete_args` will be added to `concrete_args`, otherwise does nothing. Returns: `torch.fx.Graph`: A FX `torch.fx.Graph` representing the semantics of the passed-in `root`. """ sig = inspect.signature(root.forward if isinstance(root, torch.nn.Module) else root) if concrete_args is None: concrete_args = {} if dummy_inputs is not None and complete_concrete_args_with_inputs_not_in_dummy_inputs: for param in sig.parameters.values(): if param.name in dummy_inputs: continue if param.default is inspect.Parameter.empty: raise ValueError(f"You need to specify a default value for the parameter {param.name}.") concrete_args.update( { p.name: p.default for p in sig.parameters.values() if (p.name not in dummy_inputs and p.name not in concrete_args) } ) input_names = sig.parameters.keys() - concrete_args.keys() # Creating a random input shape to generate dummy inputs. batch_size = _generate_random_int() sequence_length = _generate_random_int() shape = [batch_size, sequence_length] if root.__class__.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): num_choices = _generate_random_int(low=2, high=5) shape.insert(1, num_choices) inputs = dict(dummy_inputs) if dummy_inputs is not None else {} for input_name in input_names: if input_name in inputs: continue # We enforce that root must either be a PreTrainedModel or deserialized from a serialized traced model to # be able to use HFTracer._generate_dummy_input. if isinstance(root, self.supported_archs) or type(root).__qualname__.startswith( ("_deserialize_graph_module", "_CodeOnlyModule") ): inputs.update(self._generate_dummy_input(root, input_name, shape, input_names=input_names)) else: raise RuntimeError( f"Could not generate input named {input_name} for because root is not a" " transformers.PreTrainedModel." ) concrete_metas = { input_name: input_.to("meta") if isinstance(input_, torch.Tensor) else input_ for input_name, input_ in inputs.items() } for param in sig.parameters.values(): if param.kind == inspect.Parameter.VAR_KEYWORD and param.name not in input_names: concrete_metas[f"**{param.name}"] = {} self.meta_args = concrete_metas self.patched_torch_methods = { target: _gen_constructor_wrapper(getattr(torch, target)) for target in self._TORCH_METHODS_TO_PATCH } self.orig_fns = set() for name, (wrapper, orig) in self.patched_torch_methods.items(): setattr(torch, name, wrapper) self.orig_fns.add(orig) try: self.graph = super().trace(root, concrete_args=concrete_args) finally: for name, (_, orig) in self.patched_torch_methods.items(): setattr(torch, name, orig) # This is necessary because concrete args are added as input to the traced module since # https://github.com/pytorch/pytorch/pull/55888. for node in self.graph.nodes: if node.op == "placeholder": # Removing default values for inputs as the forward pass will fail with them. if node.target in input_names: node.args = () # Without this, torch.jit.script fails because the inputs type is Optional[torch.Tensor]. # It cannot infer on the attributes and methods the input should have, and fails. node.type = torch.Tensor # It is a concrete arg so it is not used and should be removed. else: to_visit = [node] to_delete = collections.OrderedDict() while to_visit: n = to_visit.pop(0) to_delete[n] = None to_visit += list(n.users.keys()) for user in reversed(to_delete.keys()): self.graph.erase_node(user) # TODO: solves GraphModule creation. # Without this, return type annotation "Tuple" is causing code execution failure. if node.op == "output": node.type = None return self.graph def _stateless_mod_instanciation_depends_on_proxies(self, mod: nn.Module) -> bool: """ Whether the module was instantiated with Proxies. If that is the case, such module cannot be a leaf module because its attributes are input-dependent. """ return any(isinstance(attr, Proxy) for attr in mod.__dict__.values()) def _insert_module_as_submodule(self, mod: nn.Module) -> str: """ Helper method which tries to insert a module that was not declared as submodule. """ # If one of the module attributes is a Proxy, it means that its instantiation is input-dependent. # It is not possible to insert such modules, those should be traced through. if self._stateless_mod_instanciation_depends_on_proxies(mod): return "" idx = 0 mod_name = mod.__class__.__name__.lower() path = f"{mod_name}_{idx}" already_inserted = False while hasattr(self.root, path): if getattr(self.root, path) is mod: already_inserted = True break path = f"{mod_name}_{idx}" idx += 1 # No need to add multiple instances of the same module. if not already_inserted: self.root.add_module(path, mod) return path def path_of_module(self, mod: nn.Module) -> str: """ Helper method to find the qualified name of `mod` in the Module hierarchy of `root`. For example, if `root` has a submodule named `foo`, which has a submodule named `bar`, passing `bar` into this function will return the string "foo.bar". Args: mod (str): The `Module` to retrieve the qualified name for. """ try: return super().path_of_module(mod) except NameError as e: if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and len(list(mod.buffers())) == 0: path = self._insert_module_as_submodule(mod) return path raise e def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool: return (not self._stateless_mod_instanciation_depends_on_proxies(m)) and super().is_leaf_module( m, module_qualified_name ) @compatibility(is_backward_compatible=True) def keys(self, obj: "Proxy") -> Any: """Called when a proxy object is has the keys() method called. This is what happens when ** is called on a proxy. This should return an iterator if ** is supposed to work in your custom tracer. """ attribute = HFAttribute(obj, "keys")() if obj.node.target == "**kwargs": return attribute._metadata return attribute def get_concrete_args(model: nn.Module, input_names: List[str]): sig = inspect.signature(model.forward) if not (set(input_names) <= set(sig.parameters.keys())): formatted_input_names = input_names[0] if len(input_names) == 1 else ", ".join(input_names) formatted_allowed_input_names = ", ".join(sig.parameters.keys()) raise ValueError( f"The model does not have input(s) named: {formatted_input_names}, expected a subset of the following:" f" {formatted_allowed_input_names}" ) return {p.name: p.default for p in sig.parameters.values() if p.name not in input_names} def is_model_supported(model: PreTrainedModel): return model.__class__.__name__ in _SUPPORTED_MODELS def check_if_model_is_supported(model: PreTrainedModel): if not is_model_supported(model): supported_model_names = ", ".join(_SUPPORTED_MODELS) raise NotImplementedError( f"Model {model.__class__.__name__} is not supported yet, supported models: {supported_model_names}" ) def symbolic_trace( model: PreTrainedModel, input_names: Optional[List[str]] = None, disable_check: bool = False, tracer_cls: Type[HFTracer] = HFTracer, ) -> GraphModule: """ Performs symbolic tracing on the model. Args: model ([`PretrainedModel`]): The model to trace. input_names (`List[str]`, *optional*): The names of the inputs of the traced model. If unset, model.dummy_inputs.keys() are used instead. disable_check (`bool`, *optional*, defaults to `False`): If `True`, no check is done before trying to trace the model, this is mostly usesul for debugging purposes. tracer_cls (`Type[HFTracer]`, *optional*, defaults to `HFTracer`): The tracer class to use for instantiating the tracer. If unset, `HFTracer` is used instead. Returns: `torch.fx.GraphModule`: A GraphModule constructed by recording operations seen while tracing the model. Example: ```python from transformers.utils.fx import symbolic_trace traced_model = symbolic_trace(model, input_names=["input_ids", "attention_mask", "token_type_ids"]) ``` """ if input_names is None: input_names = model.dummy_inputs.keys() input_names = list(input_names) concrete_args = get_concrete_args(model, input_names) if not disable_check: check_if_model_is_supported(model) # Tracing. tracer = tracer_cls() traced_graph = tracer.trace(model, concrete_args=concrete_args) traced = torch.fx.GraphModule(model, traced_graph) traced.config = model.config # The model class must be stored as an attribute to allow model deserialization, which uses trace, and thus # _generate_dummy_input, where the model class is needed. traced.class_for_deserialization = model.__class__ traced.device = model.device return traced
transformers/src/transformers/utils/fx.py/0
{ "file_path": "transformers/src/transformers/utils/fx.py", "repo_id": "transformers", "token_count": 22754 }
363
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # How to add a new example script in 🤗 Transformers This folder provide a template for adding a new example script implementing a training or inference task with the models in the 🤗 Transformers library. To use it, you will need to install cookiecutter: ``` pip install cookiecutter ``` or refer to the installation page of the [cookiecutter documentation](https://cookiecutter.readthedocs.io/). You can then run the following command inside the `examples` folder of the transformers repo: ``` cookiecutter ../templates/adding_a_new_example_script/ ``` and answer the questions asked, which will generate a new folder where you will find a pre-filled template for your example following the best practices we recommend for them. Adjust the way the data is preprocessed, the model is loaded or the Trainer is instantiated then when you're happy, add a `README.md` in the folder (or complete the existing one if you added a script to an existing folder) telling a user how to run your script. Make a PR to the 🤗 Transformers repo. Don't forget to tweet about your new example with a carbon screenshot of how to run it and tag @huggingface!
transformers/templates/adding_a_new_example_script/README.md/0
{ "file_path": "transformers/templates/adding_a_new_example_script/README.md", "repo_id": "transformers", "token_count": 442 }
364
# coding=utf-8 # Copyright 2022 {{cookiecutter.authors}} and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for {{cookiecutter.modelname}}.""" {%- if cookiecutter.tokenizer_type == "Based on BERT" %} from ...utils import logging from ..bert.tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "{{cookiecutter.checkpoint_identifier}}": 512, } PRETRAINED_INIT_CONFIGURATION = { "{{cookiecutter.checkpoint_identifier}}": {"do_lower_case": False}, } class {{cookiecutter.camelcase_modelname}}Tokenizer(BertTokenizer): r""" Construct a {{cookiecutter.modelname}} tokenizer. [`~{{cookiecutter.camelcase_modelname}}Tokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION {%- elif cookiecutter.tokenizer_type == "Based on BART" %} from ...utils import logging from ..bart.tokenization_bart import BartTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.json", }, "merges_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/merges.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "{{cookiecutter.checkpoint_identifier}}": 1024, } class {{cookiecutter.camelcase_modelname}}Tokenizer(BartTokenizer): """ Construct a {{cookiecutter.modelname}} tokenizer. [`~{{cookiecutter.camelcase_modelname}}Tokenizer`] is identical to [`BartTokenizer`] and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass [`BartTokenizer`] for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES {%- elif cookiecutter.tokenizer_type == "Standalone" %} from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "{{cookiecutter.checkpoint_identifier}}": 1024, } class {{cookiecutter.camelcase_modelname}}Tokenizer(PreTrainedTokenizer): """ Construct a {{cookiecutter.modelname}} tokenizer. Based on byte-level Byte-Pair-Encoding. Args: vocab_file (`str`): Path to the vocabulary file. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", **kwargs ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs) """ Initialisation """ @property def vocab_size(self): """ Returns vocab size """ def get_vocab(self): """ Returns vocab as a dict """ def _tokenize(self, text): """ Returns a tokenized string. """ def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ def save_vocabulary(self, save_directory): """ Save the vocabulary and special tokens file to a directory. Args: save_directory (`str`): The directory in which to save the vocabulary. Returns: `Tuple(str)`: Paths to the files saved. """ def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A {{cookiecutter.modelname}} sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. {{cookiecutter.modelname}} does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) class {{cookiecutter.camelcase_modelname}}TokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" {{cookiecutter.modelname}} tokenizer (backed by HuggingFace's *tokenizers* library). Args: vocab_file (`str`): Path to the vocabulary file. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, merges_file, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", add_prefix_space=False, trim_offsets=True, **kwargs ): super().__init__( ByteLevelBPETokenizer( vocab_file=vocab_file, merges_file=merges_file, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, ), bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs, ) self.add_prefix_space = add_prefix_space def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return output return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. {{cookiecutter.modelname}} does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] {% endif %}
transformers/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/tokenization_{{cookiecutter.lowercase_modelname}}.py/0
{ "file_path": "transformers/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/tokenization_{{cookiecutter.lowercase_modelname}}.py", "repo_id": "transformers", "token_count": 5165 }
365
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class TFBenchmarkTest(unittest.TestCase): def check_results_dict_not_empty(self, results): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"], model_result["ss"]): result = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(result) def test_inference_no_configs_eager(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], eager_mode=True, multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_no_configs_only_pretrain(self): MODEL_ID = "sgugger/tiny-distilbert-classification" benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, only_pretrain_model=True, ) benchmark = TensorFlowBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_no_configs_graph(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_with_configs_eager(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], eager_mode=True, multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args, [config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_with_configs_graph(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args, [config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_train_no_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def test_train_with_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args, [config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def test_inference_encoder_decoder_with_configs(self): MODEL_ID = "patrickvonplaten/t5-tiny-random" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU")) == 0, "Cannot do xla on CPU.") def test_inference_no_configs_xla(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], use_xla=True, multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_save_csv_files(self): MODEL_ID = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], inference=True, save_to_csv=True, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(tmp_dir, "inf_time.csv"), inference_memory_csv_file=os.path.join(tmp_dir, "inf_mem.csv"), env_info_csv_file=os.path.join(tmp_dir, "env.csv"), multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) benchmark.run() self.assertTrue(Path(os.path.join(tmp_dir, "inf_time.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "inf_mem.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "env.csv")).exists()) def test_trace_memory(self): MODEL_ID = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(summary): self.assertTrue(hasattr(summary, "sequential")) self.assertTrue(hasattr(summary, "cumulative")) self.assertTrue(hasattr(summary, "current")) self.assertTrue(hasattr(summary, "total")) with tempfile.TemporaryDirectory() as tmp_dir: benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], inference=True, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(tmp_dir, "log.txt"), log_print=True, trace_memory_line_by_line=True, eager_mode=True, multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) result = benchmark.run() _check_summary_is_not_empty(result.inference_summary) self.assertTrue(Path(os.path.join(tmp_dir, "log.txt")).exists())
transformers/tests/benchmark/test_benchmark_tf.py/0
{ "file_path": "transformers/tests/benchmark/test_benchmark_tf.py", "repo_id": "transformers", "token_count": 4131 }
366
# coding=utf-8 # Copyright 2021 the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, Wav2Vec2Config, Wav2Vec2FeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR = get_tests_dir("fixtures") SAMPLE_FEATURE_EXTRACTION_CONFIG = get_tests_dir("fixtures/dummy_feature_extractor_config.json") SAMPLE_CONFIG = get_tests_dir("fixtures/dummy-config.json") class AutoFeatureExtractorTest(unittest.TestCase): def setUp(self): transformers.dynamic_module_utils.TIME_OUT_REMOTE_CODE = 0 def test_feature_extractor_from_model_shortcut(self): config = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsInstance(config, Wav2Vec2FeatureExtractor) def test_feature_extractor_from_local_directory_from_key(self): config = AutoFeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) self.assertIsInstance(config, Wav2Vec2FeatureExtractor) def test_feature_extractor_from_local_directory_from_config(self): with tempfile.TemporaryDirectory() as tmpdirname: model_config = Wav2Vec2Config() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally config_dict = AutoFeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR).to_dict() config_dict.pop("feature_extractor_type") config = Wav2Vec2FeatureExtractor(**config_dict) # save in new folder model_config.save_pretrained(tmpdirname) config.save_pretrained(tmpdirname) config = AutoFeatureExtractor.from_pretrained(tmpdirname) # make sure private variable is not incorrectly saved dict_as_saved = json.loads(config.to_json_string()) self.assertTrue("_processor_class" not in dict_as_saved) self.assertIsInstance(config, Wav2Vec2FeatureExtractor) def test_feature_extractor_from_local_file(self): config = AutoFeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG) self.assertIsInstance(config, Wav2Vec2FeatureExtractor) def test_repo_not_found(self): with self.assertRaisesRegex( EnvironmentError, "bert-base is not a local folder and is not a valid model identifier" ): _ = AutoFeatureExtractor.from_pretrained("bert-base") def test_revision_not_found(self): with self.assertRaisesRegex( EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _ = AutoFeatureExtractor.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa") def test_feature_extractor_not_found(self): with self.assertRaisesRegex( EnvironmentError, "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.", ): _ = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model") def test_from_pretrained_dynamic_feature_extractor(self): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(ValueError): feature_extractor = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(ValueError): feature_extractor = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=False ) feature_extractor = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=True ) self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor") # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(tmp_dir) reloaded_feature_extractor = AutoFeatureExtractor.from_pretrained(tmp_dir, trust_remote_code=True) self.assertEqual(reloaded_feature_extractor.__class__.__name__, "NewFeatureExtractor") def test_new_feature_extractor_registration(self): try: AutoConfig.register("custom", CustomConfig) AutoFeatureExtractor.register(CustomConfig, CustomFeatureExtractor) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(ValueError): AutoFeatureExtractor.register(Wav2Vec2Config, Wav2Vec2FeatureExtractor) # Now that the config is registered, it can be used as any other config with the auto-API feature_extractor = CustomFeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(tmp_dir) new_feature_extractor = AutoFeatureExtractor.from_pretrained(tmp_dir) self.assertIsInstance(new_feature_extractor, CustomFeatureExtractor) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def test_from_pretrained_dynamic_feature_extractor_conflict(self): class NewFeatureExtractor(Wav2Vec2FeatureExtractor): is_local = True try: AutoConfig.register("custom", CustomConfig) AutoFeatureExtractor.register(CustomConfig, NewFeatureExtractor) # If remote code is not set, the default is to use local feature_extractor = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor") self.assertTrue(feature_extractor.is_local) # If remote code is disabled, we load the local one. feature_extractor = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=False ) self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor") self.assertTrue(feature_extractor.is_local) # If remote is enabled, we load from the Hub feature_extractor = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=True ) self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor") self.assertTrue(not hasattr(feature_extractor, "is_local")) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
transformers/tests/models/auto/test_feature_extraction_auto.py/0
{ "file_path": "transformers/tests/models/auto/test_feature_extraction_auto.py", "repo_id": "transformers", "token_count": 3275 }
367
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy import tempfile import unittest import numpy as np from transformers import BartConfig, BartTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin from ...utils.test_modeling_tf_core import TFCoreModelTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBartModel @require_tf class TFBartModelTester: config_cls = BartConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs_for_common(self): # Ids are clipped to avoid "beginng of sequence", "end of sequence", and "pad" tokens input_ids = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), clip_value_min=self.eos_token_id + 1, clip_value_max=self.vocab_size + 1, ) # Explicity add "end of sequence" to the inputs eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) inputs_dict = prepare_bart_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFBartModel(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] head_mask = inputs_dict["head_mask"] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask) output_from_no_past = output_from_no_past[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values) output_from_past = output_from_past[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_bart_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) if decoder_attention_mask is None: decoder_attention_mask = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8), ], axis=-1, ) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class TFBartModelTest(TFModelTesterMixin, TFCoreModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBartModel) if is_tf_available() else () ) all_generative_model_classes = (TFBartForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = ( { "conversational": TFBartForConditionalGeneration, "feature-extraction": TFBartModel, "summarization": TFBartForConditionalGeneration, "text-classification": TFBartForSequenceClassification, "text2text-generation": TFBartForConditionalGeneration, "translation": TFBartForConditionalGeneration, "zero-shot": TFBartForSequenceClassification, } if is_tf_available() else {} ) is_encoder_decoder = True test_pruning = False test_onnx = True onnx_min_opset = 10 def setUp(self): self.model_tester = TFBartModelTester(self) self.config_tester = ConfigTester(self, config_class=BartConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) # TODO (Joao): fix me @unittest.skip("Onnx compliancy broke with TF 2.10") def test_onnx_compliancy(self): pass # TFBartForSequenceClassification does not support inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (TFBartForConditionalGeneration, TFBartModel): model = model_class(config) inputs = copy.deepcopy(inputs_dict) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) if not self.is_encoder_decoder: inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids) else: inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids) inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids) inputs = self._prepare_for_class(inputs, model_class) model(inputs) # TFBartForSequenceClassification does not support inputs_embeds @slow def test_graph_mode_with_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (TFBartForConditionalGeneration, TFBartModel): model = model_class(config) inputs = copy.deepcopy(inputs_dict) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) if not self.is_encoder_decoder: inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids) else: inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids) inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids) inputs = self._prepare_for_class(inputs, model_class) @tf.function def run_in_graph_mode(): return model(inputs) outputs = run_in_graph_mode() self.assertIsNotNone(outputs) @slow def test_save_load_after_resize_token_embeddings(self): # Custom version of this test to ensure "end of sequence" tokens are present throughout if not self.test_resize_embeddings: return config, original_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # create a model with resized (expended) embeddings new_tokens_size = 10 old_total_size = config.vocab_size new_total_size = old_total_size + new_tokens_size model = model_class(config=copy.deepcopy(config)) # `resize_token_embeddings` mutates `config` model.build_in_name_scope() model.resize_token_embeddings(new_total_size) # fetch the output for an input exclusively made of new members of the vocabulary inputs_dict = copy.deepcopy(original_inputs_dict) ids_feat_name = None if "input_ids" in inputs_dict: ids_feat_name = "input_ids" elif "decoder_input_ids" in inputs_dict: ids_feat_name = "decoder_input_ids" else: assert False, "No input ids feature found in the inputs dict" new_vocab_input_ids = ids_tensor(inputs_dict[ids_feat_name].shape, new_tokens_size) new_vocab_input_ids += old_total_size # Replace last id with EOS token new_vocab_input_ids = new_vocab_input_ids[:, :-1] new_vocab_input_ids = tf.concat( [new_vocab_input_ids, tf.ones((tf.shape(new_vocab_input_ids)[0], 1), dtype=tf.int32) * 2], axis=1 ) inputs_dict[ids_feat_name] = new_vocab_input_ids if "input_ids" in inputs_dict: inputs_dict["input_ids"] = new_vocab_input_ids if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"] = new_vocab_input_ids prepared_inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**prepared_inputs) # save and load the model with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=False) model = model_class.from_pretrained(tmpdirname) restored_model_outputs = model(**prepared_inputs) # check that the output for the restored model is the same self.assert_outputs_same(restored_model_outputs, outputs) @unittest.skip("Does not support conversations.") def test_pipeline_conversational(self): pass def _long_tensor(tok_lst): return tf.constant(tok_lst, dtype=tf.int32) @require_tf class TFBartHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): eos_column_vector = tf.ones((4, 1), dtype=tf.int32) * 2 input_ids = tf.concat([ids_tensor((4, 6), self.vocab_size - 3) + 3, eos_column_vector], axis=1) batch_size = input_ids.shape[0] config = BartConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, decoder_start_token_id=2, ) return config, input_ids, batch_size def test_lm_forward(self): config, input_ids, batch_size = self._get_config_and_data() decoder_lm_labels = ids_tensor([batch_size, input_ids.shape[1]], self.vocab_size) lm_model = TFBartForConditionalGeneration(config) outputs = lm_model(input_ids=input_ids, labels=decoder_lm_labels, decoder_input_ids=input_ids, use_cache=False) expected_shape = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs.logits.shape, expected_shape) def test_lm_uneven_forward(self): config = BartConfig( vocab_size=10, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, ) lm_model = TFBartForConditionalGeneration(config) context = tf.fill((7, 2), 4) summary = tf.fill((7, 7), 6) outputs = lm_model(input_ids=context, decoder_input_ids=summary, use_cache=False) expected_shape = (*summary.shape, config.vocab_size) self.assertEqual(outputs.logits.shape, expected_shape) @require_tf class TFBartForSequenceClassificationTest(unittest.TestCase): def test_model_fails_for_uneven_eos_tokens(self): config = BartConfig(eos_token_id=2) model = TFBartForSequenceClassification(config) inputs = { "input_ids": tf.constant([[1, 2, 2, 2], [1, 3, 2, 2], [2, 2, 3, 3]]), "attention_mask": tf.constant([[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]), } with self.assertRaises(tf.errors.InvalidArgumentError): model(inputs) @slow @require_tf class TFBartModelIntegrationTest(unittest.TestCase): def test_inference_no_head(self): model = TFBartForConditionalGeneration.from_pretrained("facebook/bart-large").model input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) attention_mask = tf.cast(tf.math.not_equal(input_ids, model.config.pad_token_id), tf.int8) output = model(input_ids=input_ids, attention_mask=attention_mask)[0] expected_shape = (1, 11, 1024) self.assertEqual(output.shape, expected_shape) expected_slice = tf.convert_to_tensor( [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-3) def test_cnn_summarization_same_as_fairseq_hard(self): hf = TFBartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn") tok = self.tok FRANCE_ARTICLE = ( # @noqa " Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings" " Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane." ' Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation."' ' He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s' " comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French" " Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a" " phone at the wreckage site. The two publications described the supposed video, but did not post it on" " their websites. The publications said that they watched the video, which was found by a source close to" " the investigation. \"One can hear cries of 'My God' in several languages,\" Paris Match reported." ' "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the' " cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the" ' screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt,' " editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said" " the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman" " in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the" ' reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said,' ' but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be' " sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by" " specialized technicians working hand-in-hand with investigators. But none of the cell phones found so" " far have been sent to the institute, Menichini said. Asked whether staff involved in the search could" ' have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin' ' Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match' ' are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is something' " we did not know before. ... Overall we can say many things of the investigation weren't revealed by the" ' investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline' " Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the" " controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the" ' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of' ' severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school' " discovered in an internal investigation, Lufthansa said, included medical documents he submitted in" " connection with resuming his flight training. The announcement indicates that Lufthansa, the parent" " company of Germanwings, knew of Lubitz's battle with depression, allowed him to continue training and" " ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100%" ' fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was' " sharing the information and documents -- including training and medical records -- with public" " prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the" " past week to recover human remains and plane debris scattered across a steep mountainside. He saw the" " crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash" " site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late" " Tuesday that no visible human remains were left at the site but recovery teams would keep searching." " French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all" " the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini said." " Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew" " on board. Check out the latest from our correspondents . The details about Lubitz's correspondence with" " the flight school during his training were among several developments as investigators continued to" " delve into what caused the crash and Lubitz's possible motive for downing the jet. A Lufthansa" " spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his" ' examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in' " Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at" " some point before his aviation career and underwent psychotherapy before he got his pilot's license." " Kumpa emphasized there's no evidence suggesting Lubitz was suicidal or acting aggressively before the" " crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to" " lose his pilot's license, a European government official briefed on the investigation told CNN on" ' Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being' " considered. Another source, a law enforcement official briefed on the investigation, also told CNN that" " authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would" " not be allowed to fly because of his medical problems. Lubitz's girlfriend told investigators he had" " seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded" " he had psychological issues, the European government official said. But no matter what details emerge" " about his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact' " that maybe they weren't going to keep doing their job and they're upset about that and so they're" ' suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to' " also take that rage and turn it outward on 149 other people who had nothing to do with the person's" ' problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight' " 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura" " Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine" " Amiel and Anna-Maja Rappard contributed to this report." ) EXPECTED_SUMMARY_FRANCE = ( "French prosecutor says he's not aware of any video footage from on board the plane. German daily Bild" " and French Paris Match claim to have found a cell phone video of the crash. A French Gendarmerie" ' spokesman calls the reports "completely wrong" and "unwarranted" German airline Lufthansa confirms' " co-pilot Andreas Lubitz had battled depression." ) SHORTER_ARTICLE = ( " (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) EXPECTED_SUMMARY_SHORTER = ( "The Palestinian Authority becomes the 123rd member of the International Criminal Court. The move gives" " the court jurisdiction over alleged crimes in Palestinian territories. Israel and the United States" " opposed the Palestinians' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki said" " it was a move toward greater justice." ) # The below article tests that we don't add any hypotheses outside of the top n_beams IRAN_ARTICLE = ( " (CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran" " in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively" " block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger." " Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli" " Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a" " letter to the Iranian leadership warning them away from a deal. The debate that has already begun since" " the announcement of the new framework will likely result in more heat than light. It will not be helped" " by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: ." " The most misleading assertion, despite universal rejection by experts, is that the negotiations'" " objective at the outset was the total elimination of any nuclear program in Iran. That is the position" " of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it" " had been, there would have been no Iranian team at the negotiating table. Rather, the objective has" " always been to structure an agreement or series of agreements so that Iran could not covertly develop a" " nuclear arsenal before the United States and its allies could respond. The new framework has exceeded" " expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by" " two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another" " dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite" " sharp accusations by some in the United States and its allies, Iran denies having such a program, and" " U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's" " continued cooperation with International Atomic Energy Agency inspections is further evidence on this" " point, and we'll know even more about Iran's program in the coming months and years because of the deal." " In fact, the inspections provisions that are part of this agreement are designed to protect against any" " covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that" " the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter" " warning that a deal might be killed by Congress or a future president). This of course is not the case." " The talks were between Iran and the five permanent members of the U.N. Security Council (United States," " United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has" " played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement" " reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran" " and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement" " contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the" " case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased" " or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes" " Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear" " sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going" " forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such" " a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the" ' agreement should be a formal treaty requiring the Senate to "advise and consent." But the issue is not' " suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New" " START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement" " with Iran will not be so balanced. The restrictions and obligations in the final framework agreement" " will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove" " most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally" " some insist that any agreement must address Iranian missile programs, human rights violations or support" " for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are" " unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in" " the negotiations would be a poison pill. This agreement should be judged on its merits and on how it" " affects the security of our negotiating partners and allies, including Israel. Those judgments should be" " fact-based, not based on questionable assertions or dubious assumptions." ) EXPECTED_SUMMARY_IRAN = ( "The U.S. and its negotiating partners reached a very strong framework agreement with Iran. Peter Bergen:" " The debate that has already begun will likely result in more heat than light. He says the agreement" " limits Iran's nuclear program in such a way as to effectively block it from building a nuclear weapon." " Bergen says the most important aim of a nuclear deal is preventing a nuclear Iran." ) ARTICLE_SUBWAY = ( " New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) EXPECTED_SUMMARY_SUBWAY = ( "Liana Barrientos has been married 10 times, sometimes within two weeks of each other. Prosecutors say the" " marriages were part of an immigration scam. On Friday, she pleaded not guilty at State Supreme Court in" " the Bronx. She was arrested and charged with theft of service and criminal trespass for allegedly" " sneaking into the subway." ) dct = tok( [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY], max_length=1024, truncation_strategy="only_first", padding="longest", truncation=True, return_tensors="tf", ) self.assertEqual(1024, dct["input_ids"].shape[1]) hypotheses_batch = hf.generate( input_ids=dct["input_ids"], attention_mask=dct["attention_mask"], ) assert hypotheses_batch[:, 1].numpy().tolist() == [0, 0, 0, 0] # test force_bos_token_to_be_generated decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False) expected_batch = [ EXPECTED_SUMMARY_FRANCE, EXPECTED_SUMMARY_SHORTER, EXPECTED_SUMMARY_IRAN, EXPECTED_SUMMARY_SUBWAY, ] assert decoded == expected_batch @cached_property def tok(self): return BartTokenizer.from_pretrained("facebook/bart-large") @slow def test_contrastive_search_bart(self): article = ( " New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") bart_model = TFBartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn") input_ids = bart_tokenizer( article, add_special_tokens=False, truncation=True, max_length=512, return_tensors="tf" ).input_ids outputs = bart_model.generate(input_ids, penalty_alpha=0.5, top_k=5, max_length=64) generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "Liana Barrientos, 39, pleaded not guilty to charges related to false marriage statements. " "Prosecutors say she married at least 10 times, sometimes within two weeks of each other. She is " "accused of being part of an immigration scam to get permanent residency. If convicted, she faces up " "to four years in" ], ) @slow def test_contrastive_search_bart_xla(self): article = ( " New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") bart_model = TFBartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn") input_ids = bart_tokenizer( article, add_special_tokens=False, truncation=True, max_length=512, return_tensors="tf" ).input_ids xla_generate = tf.function(bart_model.generate, jit_compile=True) # no_repeat_ngram_size set to 0 because it isn't compatible with XLA, but doesn't change the original output outputs = xla_generate(input_ids, penalty_alpha=0.5, top_k=5, max_length=64, no_repeat_ngram_size=0) generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "Liana Barrientos, 39, pleaded not guilty to charges related to false marriage statements. " "Prosecutors say she married at least 10 times, sometimes within two weeks of each other. She is " "accused of being part of an immigration scam to get permanent residency. If convicted, she faces up " "to four years in" ], ) @slow @require_tf class FasterTFBartModelIntegrationTests(unittest.TestCase): """These tests are useful for debugging since they operate on a model with 1 encoder layer and 1 decoder layer.""" @cached_property def tok(self): return BartTokenizer.from_pretrained("facebook/bart-large") @cached_property def xsum_1_1_model(self): return TFBartForConditionalGeneration.from_pretrained("sshleifer/distilbart-xsum-1-1") def test_xsum_1_1_generation(self): model = self.xsum_1_1_model assert model.model.decoder.embed_tokens == model.model.shared ARTICLE = ( "The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes." ) EXPECTED = ( " The International Criminal Court (ICC) has announced that it has been announced by the International" " Criminal court." ) dct = self.tok(ARTICLE, return_tensors="tf") generated_ids = model.generate(**dct, num_beams=4) result = self.tok.batch_decode(generated_ids, skip_special_tokens=True)[0] assert result == EXPECTED def test_xsum_1_1_xla_generation(self): # same test as above, but with `no_repeat_ngram_size=0` (not compatible with XLA) and XLA comparison enabled model = self.xsum_1_1_model assert model.model.decoder.embed_tokens == model.model.shared ARTICLE = ( "The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes." ) EXPECTED = ( " The International Criminal Court (ICC) has announced that it is to be investigated by the International" " Criminal Court (ICC) over allegations of war crimes." ) dct = self.tok(ARTICLE, return_tensors="tf") generated_ids = model.generate(**dct, num_beams=4, no_repeat_ngram_size=0) result = self.tok.batch_decode(generated_ids, skip_special_tokens=True)[0] assert result == EXPECTED xla_generate = tf.function(model.generate, jit_compile=True) generated_ids = xla_generate(**dct, num_beams=4, no_repeat_ngram_size=0) result = self.tok.batch_decode(generated_ids, skip_special_tokens=True)[0] assert result == EXPECTED def test_xsum_1_1_batch_generation(self): batch = self.tok( [ "The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories." " The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is" " based. The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted" ' its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including' ' East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination' " into the situation in Palestinian territories, paving the way for possible war crimes investigations" " against Israelis. As members of the court, Palestinians may be subject to counter-charges as well." " Israel and the United States, neither of which is an ICC member, opposed the Palestinians' efforts" " to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony," ' said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome' ' Statute today, the world is also a step closer to ending a long era of impunity and injustice," he' ' said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of' ' justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was' ' just the first step for the Palestinians. "As the Rome Statute today enters into force for the State' " of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a" ' State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she' ' said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize' " Palestine for joining the ICC should immediately end their pressure, and countries that support" " universal acceptance of the court's treaty should speak out to welcome its membership,\" said" " Balkees Jarrah, international justice counsel for the group. \"What's objectionable is the attempts" " to undermine international justice, not Palestine's decision to join a treaty to which over 100" ' countries around the world are members." In January, when the preliminary ICC examination was' " opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was" ' overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s' ' decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we' ' do not believe that it is eligible to join the ICC," the State Department said in a statement. It' ' urged the warring sides to resolve their differences through direct negotiations. "We will continue' ' to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said.' " But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows' " the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor" ' Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality."' " The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The" " inquiry will include alleged war crimes committed since June. The International Criminal Court was" " set up in 2002 to prosecute genocide, crimes against humanity and war crimes.", "The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted" " Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor" ' Brice Robin told CNN that "so far no videos were used in the crash investigation." He added, "A' " person who has such a video needs to immediately give it to the investigators.\" Robin's comments" " follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the" " French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was" " recovered from a phone at the wreckage site. The two publications described the supposed video, but" " did not post it on their websites. The publications said that they watched the video, which was" " found by a source close to the investigation. \"One can hear cries of 'My God' in several" ' languages," Paris Match reported. "Metallic banging can also be heard more than three times, perhaps' " of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy" ' shake, stronger than the others, the screaming intensifies. Then nothing." "It is a very disturbing' " scene,\" said Julian Reichelt, editor-in-chief of Bild online. An official with France's accident" " investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc" " Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the" ' Germanwings crash site, told CNN that the reports were "completely wrong" and "unwarranted." Cell' ' phones have been collected at the site, he said, but that they "hadn\'t been exploited yet."' " Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute" " in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working" " hand-in-hand with investigators. But none of the cell phones found so far have been sent to the" " institute, Menichini said. Asked whether staff involved in the search could have leaked a memory" ' card to the media, Menichini answered with a categorical "no." Reichelt told "Erin Burnett:' ' Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match are' ' "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is' " something we did not know before. ... Overall we can say many things of the investigation weren't" ' revealed by the investigation at the beginning," he said. What was mental state of Germanwings' " co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled" " depression years before he took the controls of Germanwings Flight 9525, which he's accused of" " deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school" ' in 2009 that he had a "previous episode of severe depression," the airline said Tuesday. Email' " correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa" " said, included medical documents he submitted in connection with resuming his flight training. The" " announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz's battle" " with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa," " whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday" ' as a "swift and seamless clarification" and said it was sharing the information and documents --' " including training and medical records -- with public prosecutors. Spohr traveled to the crash site" " Wednesday, where recovery teams have been working for the past week to recover human remains and" " plane debris scattered across a steep mountainside. He saw the crisis center set up in" " Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving" " families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no" " visible human remains were left at the site but recovery teams would keep searching. French" " President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the" " victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini" " said. Among those personal belongings could be more cell phones belonging to the 144 passengers and" " six crew on board. Check out the latest from our correspondents . The details about Lubitz's" " correspondence with the flight school during his training were among several developments as" " investigators continued to delve into what caused the crash and Lubitz's possible motive for" " downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical" ' certificate, had passed all his examinations and "held all the licenses required." Earlier, a' " spokesman for the prosecutor's office in Dusseldorf, Christoph Kumpa, said medical records reveal" " Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent" " psychotherapy before he got his pilot's license. Kumpa emphasized there's no evidence suggesting" " Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether" " Lubitz feared his medical condition would cause him to lose his pilot's license, a European" ' government official briefed on the investigation told CNN on Tuesday. While flying was "a big part' " of his life,\" the source said, it's only one theory being considered. Another source, a law" " enforcement official briefed on the investigation, also told CNN that authorities believe the" " primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly" " because of his medical problems. Lubitz's girlfriend told investigators he had seen an eye doctor" " and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had" " psychological issues, the European government official said. But no matter what details emerge about" " his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the' " fact that maybe they weren't going to keep doing their job and they're upset about that and so" ' they\'re suicidal," he said. "But there is no mental illness that explains why somebody then feels' " entitled to also take that rage and turn it outward on 149 other people who had nothing to do with" " the person's problems.\" Germanwings crash compensation: What we know . Who was the captain of" " Germanwings Flight 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from" " Dusseldorf, while Laura Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff," " Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.", ], return_tensors="tf", padding="longest", truncation=True, ) generated_ids = self.xsum_1_1_model.generate(**batch, num_beams=4) result = self.tok.batch_decode(generated_ids, skip_special_tokens=True) assert ( result[0] == " The International Criminal Court (ICC) has announced that it has been announced by the International" " Criminal court." ) assert ( result[1] == " An investigation into the crash that killed at least 10 people in the French capital has been" " released by the French police investigating the crash." ) def test_encoder_equiv(self): batch = self.tok( [ "The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories." " The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is" " based. The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted" ' its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including' ' East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination' " into the situation in Palestinian territories, paving the way for possible war crimes investigations" " against Israelis. As members of the court, Palestinians may be subject to counter-charges as well." " Israel and the United States, neither of which is an ICC member, opposed the Palestinians' efforts" " to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony," ' said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome' ' Statute today, the world is also a step closer to ending a long era of impunity and injustice," he' ' said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of' ' justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was' ' just the first step for the Palestinians. "As the Rome Statute today enters into force for the State' " of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a" ' State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she' ' said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize' " Palestine for joining the ICC should immediately end their pressure, and countries that support" " universal acceptance of the court's treaty should speak out to welcome its membership,\" said" " Balkees Jarrah, international justice counsel for the group. \"What's objectionable is the attempts" " to undermine international justice, not Palestine's decision to join a treaty to which over 100" ' countries around the world are members." In January, when the preliminary ICC examination was' " opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was" ' overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s' ' decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we' ' do not believe that it is eligible to join the ICC," the State Department said in a statement. It' ' urged the warring sides to resolve their differences through direct negotiations. "We will continue' ' to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said.' " But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows' " the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor" ' Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality."' " The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The" " inquiry will include alleged war crimes committed since June. The International Criminal Court was" " set up in 2002 to prosecute genocide, crimes against humanity and war crimes.", "The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted" " Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor" ' Brice Robin told CNN that "so far no videos were used in the crash investigation." He added, "A' " person who has such a video needs to immediately give it to the investigators.\" Robin's comments" " follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the" " French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was" " recovered from a phone at the wreckage site. The two publications described the supposed video, but" " did not post it on their websites. The publications said that they watched the video, which was" " found by a source close to the investigation. \"One can hear cries of 'My God' in several" ' languages," Paris Match reported. "Metallic banging can also be heard more than three times, perhaps' " of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy" ' shake, stronger than the others, the screaming intensifies. Then nothing." "It is a very disturbing' " scene,\" said Julian Reichelt, editor-in-chief of Bild online. An official with France's accident" " investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc" " Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the" ' Germanwings crash site, told CNN that the reports were "completely wrong" and "unwarranted." Cell' ' phones have been collected at the site, he said, but that they "hadn\'t been exploited yet."' " Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute" " in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working" " hand-in-hand with investigators. But none of the cell phones found so far have been sent to the" " institute, Menichini said. Asked whether staff involved in the search could have leaked a memory" ' card to the media, Menichini answered with a categorical "no." Reichelt told "Erin Burnett:' ' Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match are' ' "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is' " something we did not know before. ... Overall we can say many things of the investigation weren't" ' revealed by the investigation at the beginning," he said. What was mental state of Germanwings' " co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled" " depression years before he took the controls of Germanwings Flight 9525, which he's accused of" " deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school" ' in 2009 that he had a "previous episode of severe depression," the airline said Tuesday. Email' " correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa" " said, included medical documents he submitted in connection with resuming his flight training. The" " announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz's battle" " with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa," " whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday" ' as a "swift and seamless clarification" and said it was sharing the information and documents --' " including training and medical records -- with public prosecutors. Spohr traveled to the crash site" " Wednesday, where recovery teams have been working for the past week to recover human remains and" " plane debris scattered across a steep mountainside. He saw the crisis center set up in" " Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving" " families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no" " visible human remains were left at the site but recovery teams would keep searching. French" " President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the" " victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini" " said. Among those personal belongings could be more cell phones belonging to the 144 passengers and" " six crew on board. Check out the latest from our correspondents . The details about Lubitz's" " correspondence with the flight school during his training were among several developments as" " investigators continued to delve into what caused the crash and Lubitz's possible motive for" " downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical" ' certificate, had passed all his examinations and "held all the licenses required." Earlier, a' " spokesman for the prosecutor's office in Dusseldorf, Christoph Kumpa, said medical records reveal" " Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent" " psychotherapy before he got his pilot's license. Kumpa emphasized there's no evidence suggesting" " Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether" " Lubitz feared his medical condition would cause him to lose his pilot's license, a European" ' government official briefed on the investigation told CNN on Tuesday. While flying was "a big part' " of his life,\" the source said, it's only one theory being considered. Another source, a law" " enforcement official briefed on the investigation, also told CNN that authorities believe the" " primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly" " because of his medical problems. Lubitz's girlfriend told investigators he had seen an eye doctor" " and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had" " psychological issues, the European government official said. But no matter what details emerge about" " his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the' " fact that maybe they weren't going to keep doing their job and they're upset about that and so" ' they\'re suicidal," he said. "But there is no mental illness that explains why somebody then feels' " entitled to also take that rage and turn it outward on 149 other people who had nothing to do with" " the person's problems.\" Germanwings crash compensation: What we know . Who was the captain of" " Germanwings Flight 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from" " Dusseldorf, while Laura Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff," " Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.", ], return_tensors="tf", padding="longest", truncation=True, ) features = self.xsum_1_1_model.get_encoder()(**batch).last_hidden_state expected = np.array([[-0.0828, -0.0251, -0.0674], [0.1277, 0.3311, -0.0255], [0.2613, -0.0840, -0.2763]]) assert np.allclose(features[0, :3, :3].numpy(), expected, atol=1e-3)
transformers/tests/models/bart/test_modeling_tf_bart.py/0
{ "file_path": "transformers/tests/models/bart/test_modeling_tf_bart.py", "repo_id": "transformers", "token_count": 30096 }
368
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow Blip model. """ from __future__ import annotations import inspect import tempfile import unittest import numpy as np import requests from transformers import BlipConfig, BlipTextConfig, BlipVisionConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipTextModel, TFBlipVisionModel, ) from transformers.modeling_tf_utils import keras from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import BlipProcessor class TFBlipVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=1e-10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return BlipVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = TFBlipVisionModel(config=config) result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFBlipVisionModelTest(TFModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Blip does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TFBlipVisionModel,) if is_tf_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFBlipVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=BlipVisionConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Blip does not use inputs_embeds") def test_inputs_embeds(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, keras.layers.Layer)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="BlipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="BlipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFBlipVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class TFBlipTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, bos_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: input_mask = input_mask.numpy() batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 input_mask = tf.convert_to_tensor(input_mask) config = self.get_config() return config, input_ids, input_mask def get_config(self): return BlipTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, ) def create_and_check_model(self, config, input_ids, input_mask): model = TFBlipTextModel(config=config) result = model(input_ids, attention_mask=input_mask, training=False) result = model(input_ids, training=False) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFBlipTextModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFBlipTextModel,) if is_tf_available() else () fx_compatible = False test_pruning = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFBlipTextModelTester(self) self.config_tester = ConfigTester(self, config_class=BlipTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Blip does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFBlipTextModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_pt_tf_model_equivalence(self, allow_missing_keys=True): super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys) class TFBlipModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = TFBlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = TFBlipVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = TFBlipModel(config) result = model(input_ids, pixel_values, attention_mask, training=False) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_tf class TFBlipModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFBlipModel,) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFBlipModel, "image-to-text": TFBlipForConditionalGeneration} if is_tf_available() else {} ) test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_onnx = False def setUp(self): self.model_tester = TFBlipModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save BlipConfig and check if we can load BlipVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = BlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save BlipConfig and check if we can load BlipTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFBlipModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_pt_tf_model_equivalence(self, allow_missing_keys=True): super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys) @unittest.skip("Matt: Re-enable this test when we have a proper export function for TF models.") def test_saved_model_creation(self): # This fails because the if return_loss: conditional can return None or a Tensor and TF hates that. # We could fix that by setting the bool to a constant when exporting, but that requires a dedicated export # function that we don't have yet. pass class BlipTextRetrievalModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = TFBlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = TFBlipVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = TFBlipModel(config) result = model(input_ids, pixel_values, attention_mask, training=False) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict class BlipTextImageModelsModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = TFBlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = TFBlipVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = TFBlipModel(config) result = model(input_ids, pixel_values, attention_mask, training=False) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "labels": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict class BlipVQAModelsModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = TFBlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = TFBlipVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = TFBlipModel(config) result = model(input_ids, pixel_values, attention_mask, training=False) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "decoder_input_ids": input_ids, "labels": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict @require_tf @require_vision class TFBlipVQAModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFBlipForQuestionAnswering,) if is_tf_available() else () test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_onnx = False def setUp(self): self.model_tester = BlipVQAModelsModelTester(self) def _prepare_inputs_for_vqa(self): _, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict["labels"] = inputs_dict["input_ids"] inputs_dict["decoder_input_ids"] = inputs_dict["input_ids"] inputs_dict.pop("return_loss") return inputs_dict def test_class_name_consistency(self): """ Tests that all VQA models have a class name that ends with "ForQuestionAnswering" """ for model_class in self.all_model_classes: model = model_class(self.model_tester.get_config()) self.assertTrue( model.__class__.__name__.endswith("ForQuestionAnswering"), f"Class name should end with 'ForVisualQuestionAnswering' got {model.__class__.__name__}", ) def test_training(self): """ Tests that all VQA models can be trained on a single batch """ for model_class in self.all_model_classes: model = model_class(self.model_tester.get_config()) loss = model(**self.model_tester.prepare_config_and_inputs_for_common()[1], training=True).loss self.assertIsNotNone(loss, "Loss should not be None") @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="Tested in individual model tests") def test_compile_tf_model(self): pass @unittest.skip("Model doesn't have a clean loss output.") def test_keras_fit(self): pass @require_tf class TFBlipTextRetrievalModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFBlipForImageTextRetrieval,) if is_tf_available() else () test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_onnx = False def setUp(self): self.model_tester = BlipTextRetrievalModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_training(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) # hardcode labels to be the same as input_ids inputs["labels"] = inputs["input_ids"] loss = model(**inputs, training=True).loss self.assertTrue(loss is not None) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save BlipConfig and check if we can load BlipVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = BlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save BlipConfig and check if we can load BlipTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFBlipModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="Tested in individual model tests") def test_compile_tf_model(self): pass @unittest.skip("Model doesn't have a clean loss output.") def test_keras_fit(self): pass @require_tf class TFBlipTextImageModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFBlipForConditionalGeneration,) if is_tf_available() else () test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_onnx = False def setUp(self): self.model_tester = BlipTextImageModelsModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ( ["input_ids"] if model_class != TFBlipForConditionalGeneration else ["pixel_values"] ) self.assertListEqual(arg_names[:1], expected_arg_names) @unittest.skip(reason="Tested in individual model tests") def test_compile_tf_model(self): pass @unittest.skip("Has some odd input names!") def test_keras_fit(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_training(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) # hardcode labels to be the same as input_ids inputs["labels"] = inputs["input_ids"] loss = model(**inputs, training=True).loss self.assertIsNotNone(loss) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save BlipConfig and check if we can load BlipVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = BlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save BlipConfig and check if we can load BlipTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFBlipModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): url = "https://huggingface.co/hf-internal-testing/blip-test-image/resolve/main/demo.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_tf @slow class TFBlipModelIntegrationTest(unittest.TestCase): def test_inference_image_captioning(self): model = TFBlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") image = prepare_img() # image only inputs = processor(images=image, return_tensors="tf") predictions = model.generate(**inputs) # Test output self.assertEqual( predictions[0].numpy().tolist(), [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] ) # image and context context = ["a picture of"] inputs = processor(images=image, text=context, return_tensors="tf") predictions = model.generate(**inputs) # Test output self.assertEqual( predictions[0].numpy().tolist(), [30522, 1037, 3861, 1997, 1037, 2450, 1998, 2014, 3899, 2006, 1996, 3509, 102], ) def test_inference_vqa(self): model = TFBlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base") processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base") image = prepare_img() text = "how many dogs are in the picture?" inputs = processor(image, text=text, return_tensors="tf") out = model.generate(**inputs) # Test output self.assertEqual(out[0].numpy().tolist(), [30522, 1015, 102]) def test_inference_itm(self): model = TFBlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco") processor = BlipProcessor.from_pretrained("Salesforce/blip-itm-base-coco") image = prepare_img() text = "A woman and her dog sitting in a beach" inputs = processor(image, text, return_tensors="tf") out_itm = model(**inputs) out = model(**inputs, use_itm_head=False, training=False) expected_scores = tf.convert_to_tensor([[0.0029, 0.9971]]) self.assertTrue(np.allclose(tf.nn.softmax(out_itm[0]).numpy(), expected_scores, rtol=1e-3, atol=1e-3)) self.assertTrue(np.allclose(out[0], tf.convert_to_tensor([[0.5162]]), rtol=1e-3, atol=1e-3))
transformers/tests/models/blip/test_modeling_tf_blip.py/0
{ "file_path": "transformers/tests/models/blip/test_modeling_tf_blip.py", "repo_id": "transformers", "token_count": 14940 }
369
# coding=utf-8 # Copyright 2020 Google T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByT5Tokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" class ByT5TokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = ByT5Tokenizer test_rust_tokenizer = False def setUp(self): super().setUp() tokenizer = ByT5Tokenizer() tokenizer.save_pretrained(self.tmpdirname) @cached_property def t5_base_tokenizer(self): return ByT5Tokenizer.from_pretrained("google/byt5-small") def get_tokenizer(self, **kwargs) -> ByT5Tokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. toks = [] for i in range(len(tokenizer)): try: tok = tokenizer.decode([i], clean_up_tokenization_spaces=False) except UnicodeDecodeError: pass toks.append((i, tok)) toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks)) toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], add_special_tokens=False), toks)) if max_length is not None and len(toks) > max_length: toks = toks[:max_length] if min_length is not None and len(toks) < min_length and len(toks) > 0: while len(toks) < min_length: toks = toks + toks # toks_str = [t[1] for t in toks] toks_ids = [t[0] for t in toks] # Ensure consistency output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False) if " " not in output_txt and len(toks_ids) > 1: output_txt = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False) ) if with_prefix_space: output_txt = " " + output_txt output_ids = tokenizer.encode(output_txt, add_special_tokens=False) return output_txt, output_ids def test_eos_treatment(self): tokenizer = self.t5_base_tokenizer batch_with_eos_added = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"]) batch_without_eos_added = tokenizer(["hi", "I went to the gym", ""]) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"]) def test_multibytes_char(self): tokenizer = self.t5_base_tokenizer src_text = "Unicode €." encoded = tokenizer(src_text) encoded_ids = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["input_ids"], encoded_ids) # decoding decoded = tokenizer.decode(encoded_ids) self.assertEqual(decoded, "Unicode €.</s>") encoded = tokenizer("e è é ê ë") encoded_ids = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["input_ids"], encoded_ids) # decoding decoded = tokenizer.decode(encoded_ids) self.assertEqual(decoded, "e è é ê ë</s>") # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë")), "e è é ê ë</s>") def test_prepare_batch_integration(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: skip batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) if FRAMEWORK != "jax": result = list(batch.input_ids.numpy()[0]) else: result = list(batch.input_ids.tolist()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 37), batch.input_ids.shape) self.assertEqual((2, 37), batch.attention_mask.shape) def test_empty_target_text(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("decoder_input_ids", batch) self.assertNotIn("decoder_attention_mask", batch) def test_max_length_integration(self): tokenizer = self.t5_base_tokenizer tgt_text = [ "Summary of the text.", "Another summary.", ] targets = tokenizer( text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors=FRAMEWORK ) self.assertEqual(32, targets["input_ids"].shape[1]) def test_eos_in_input(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization. </s>"] tgt_text = ["Summary of the text. </s>"] expected_src_tokens = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] # fmt: skip expected_tgt_tokens = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: skip batch = tokenizer(src_text, text_target=tgt_text) self.assertEqual(expected_src_tokens, batch["input_ids"][0]) self.assertEqual(expected_tgt_tokens, batch["labels"][0]) # cannot use default save_and_load_tokenizer test method because tokenizer has no vocab def test_save_and_load_tokenizer(self): # safety check on max_len default value so we are sure the test works tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertNotEqual(tokenizer.model_max_length, 42) # Now let's start the test tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) shutil.rmtree(tmpdirname) tokenizers = self.get_tokenizers(model_max_length=42) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"]) additional_special_tokens = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token") tokenizer.add_special_tokens( {"additional_special_tokens": additional_special_tokens}, replace_additional_special_tokens=False ) before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens) self.assertEqual(after_tokenizer.model_max_length, 42) tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43) self.assertEqual(tokenizer.model_max_length, 43) shutil.rmtree(tmpdirname) # There is a conflict between the default value of extra_ids and adding a new special token through additional_special_tokens # We need to add the extra_ids in the list of the arg additional_special_tokens def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file: special_tokens_map = json.load(json_file) with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) added_tokens_extra_ids = [f"<extra_id_{i}>" for i in range(125)] special_tokens_map["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] tokenizer_config["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(special_tokens_map, outfile) with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files tokenizer_without_change_in_init = tokenizer_class.from_pretrained( tmp_dir, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"]) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained new_added_tokens = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"]) ), ) def test_decode_single_bytes(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) tokenizer = tokenizer_class.from_pretrained(tmp_dir) self.assertTrue(tokenizer.decode([255]) == "") # tokenizer can be instantiated without any pretrained files, so no need for pretrained tokenizer list def test_pretrained_model_lists(self): pass # tokenizer does not have vocabulary def test_get_vocab(self): pass # inputs cannot be pretokenized since ids depend on whole input string and not just on single characters def test_pretokenized_inputs(self): pass # tests all ids in vocab => vocab doesn't exist so unnecessary to test def test_conversion_reversible(self): pass def test_convert_tokens_to_string_format(self): # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens tokenizers = self.get_tokenizers(fast=True, do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokens = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"] string = tokenizer.convert_tokens_to_string(tokens) self.assertIsInstance(string, str) # We need a different implementation of the test of the same name defined in TokenizerTesterMixin because this tokenizer # doesn't have a vocab def test_tokenizers_common_ids_setters(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): attributes_list = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] token_id_to_test_setters = 0 token_to_test_setters = tokenizer.convert_ids_to_tokens( token_id_to_test_setters, skip_special_tokens=False ) for attr in attributes_list: setattr(tokenizer, attr + "_id", None) self.assertEqual(getattr(tokenizer, attr), None) self.assertEqual(getattr(tokenizer, attr + "_id"), None) setattr(tokenizer, attr + "_id", token_id_to_test_setters) self.assertEqual(getattr(tokenizer, attr), token_to_test_setters) self.assertEqual(getattr(tokenizer, attr + "_id"), token_id_to_test_setters) setattr(tokenizer, "additional_special_tokens_ids", []) self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), []) self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), []) setattr(tokenizer, "additional_special_tokens_ids", [token_id_to_test_setters]) self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), [token_to_test_setters]) self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), [token_id_to_test_setters])
transformers/tests/models/byt5/test_tokenization_byt5.py/0
{ "file_path": "transformers/tests/models/byt5/test_tokenization_byt5.py", "repo_id": "transformers", "token_count": 7883 }
370
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Data2VecVision model. """ import unittest from transformers import Data2VecVisionConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, Data2VecVisionForImageClassification, Data2VecVisionForSemanticSegmentation, Data2VecVisionModel, ) from transformers.models.data2vec.modeling_data2vec_vision import DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class Data2VecVisionModelTester: def __init__( self, parent, vocab_size=100, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, out_indices=[0, 1, 2, 3], ): self.parent = parent self.vocab_size = 100 self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.out_indices = out_indices self.num_labels = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return Data2VecVisionConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = Data2VecVisionModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) num_patches = (self.image_size // self.patch_size) ** 2 self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.type_sequence_label_size model = Data2VecVisionForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_for_image_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = Data2VecVisionForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Data2VecVision does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (Data2VecVisionModel, Data2VecVisionForImageClassification, Data2VecVisionForSemanticSegmentation) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": Data2VecVisionModel, "image-classification": Data2VecVisionForImageClassification, "image-segmentation": Data2VecVisionForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = Data2VecVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Data2VecVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() def test_inputs_embeds(self): # Data2VecVision does not use inputs_embeds pass @require_torch_multi_gpu @unittest.skip( reason="Data2VecVision has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def test_multi_gpu_data_parallel_forward(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs) def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class in [*get_values(MODEL_MAPPING)]: continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return config.use_cache = False config.return_dict = True for model_class in self.all_model_classes: if model_class in [*get_values(MODEL_MAPPING)] or not model_class.supports_gradient_checkpointing: continue # TODO: remove the following 3 lines once we have a MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING # this can then be incorporated into _prepare_for_class in test_modeling_common.py elif model_class.__name__ == "Data2VecVisionForSemanticSegmentation": batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = torch.zeros( [self.model_tester.batch_size, height, width], device=torch_device ).long() model = model_class(config) model.gradient_checkpointing_enable() model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-4, name="outputs", attributes=None): # We override with a slightly higher tol value, as semseg models tend to diverge a bit more super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Data2VecVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class Data2VecVisionModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( BeitImageProcessor.from_pretrained("facebook/data2vec-vision-base-ft1k") if is_vision_available() else None ) @slow def test_inference_image_classification_head_imagenet_1k(self): model = Data2VecVisionForImageClassification.from_pretrained("facebook/data2vec-vision-base-ft1k").to( torch_device ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor([0.3277, -0.1395, 0.0911]).to(torch_device) self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4)) expected_top2 = [model.config.label2id[i] for i in ["remote control, remote", "tabby, tabby cat"]] self.assertEqual(logits[0].topk(2).indices.cpu().tolist(), expected_top2)
transformers/tests/models/data2vec/test_modeling_data2vec_vision.py/0
{ "file_path": "transformers/tests/models/data2vec/test_modeling_data2vec_vision.py", "repo_id": "transformers", "token_count": 5924 }
371
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import pytest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_flash_attn, require_torch, require_torch_accelerator, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class DistilBertModelTester(object): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return DistilBertConfig( vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_distilbert_model( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DistilBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_distilbert_for_masked_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DistilBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_distilbert_for_question_answering( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DistilBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, start_positions=sequence_labels, end_positions=sequence_labels ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_distilbert_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = DistilBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_distilbert_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = DistilBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_distilbert_for_multiple_choice( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = DistilBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class DistilBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) pipeline_model_mapping = ( { "feature-extraction": DistilBertModel, "fill-mask": DistilBertForMaskedLM, "question-answering": DistilBertForQuestionAnswering, "text-classification": DistilBertForSequenceClassification, "token-classification": DistilBertForTokenClassification, "zero-shot": DistilBertForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True test_pruning = True test_resize_embeddings = True test_resize_position_embeddings = True def setUp(self): self.model_tester = DistilBertModelTester(self) self.config_tester = ConfigTester(self, config_class=DistilBertConfig, dim=37) def test_config(self): self.config_tester.run_common_tests() def test_distilbert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DistilBertModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch_accelerator def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return config.torchscript = True model = model_class(config=config) inputs_dict = self._prepare_for_class(inputs_dict, model_class) traced_model = torch.jit.trace( model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(traced_model, os.path.join(tmp, "traced_model.pt")) loaded = torch.jit.load(os.path.join(tmp, "traced_model.pt"), map_location=torch_device) loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device)) # Because DistilBertForMultipleChoice requires inputs with different shapes we need to override this test. @require_flash_attn @require_torch_accelerator @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference(self): import torch for model_class in self.all_model_classes: dummy_input = torch.LongTensor( [ [1, 2, 3, 4], [1, 2, 8, 9], [1, 2, 11, 12], [1, 2, 13, 14], ] ).to(torch_device) dummy_attention_mask = torch.LongTensor( [ [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], ] ).to(torch_device) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.bfloat16) model.to(torch_device) logits = model(dummy_input, output_hidden_states=True).hidden_states[-1] logits_fa = model_fa(dummy_input, output_hidden_states=True).hidden_states[-1] self.assertTrue(torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)) output_fa = model_fa(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) logits_fa = output_fa.hidden_states[-1] output = model(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) logits = output.hidden_states[-1] self.assertTrue(torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2)) # Because DistilBertForMultipleChoice requires inputs with different shapes we need to override this test. @require_flash_attn @require_torch_accelerator @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_padding_right(self): import torch for model_class in self.all_model_classes: dummy_input = torch.LongTensor( [ [1, 2, 3, 4], [1, 2, 8, 9], [1, 2, 11, 12], [1, 2, 13, 14], ] ).to(torch_device) dummy_attention_mask = torch.LongTensor( [ [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], ] ).to(torch_device) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, ) model.to(torch_device) logits = model(dummy_input, output_hidden_states=True).hidden_states[-1] logits_fa = model_fa(dummy_input, output_hidden_states=True).hidden_states[-1] self.assertTrue(torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)) output_fa = model_fa(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) logits_fa = output_fa.hidden_states[-1] output = model(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) logits = output.hidden_states[-1] self.assertTrue(torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2)) @require_torch class DistilBertModelIntergrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = DistilBertModel.from_pretrained("distilbert-base-uncased") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
transformers/tests/models/distilbert/test_modeling_distilbert.py/0
{ "file_path": "transformers/tests/models/distilbert/test_modeling_distilbert.py", "repo_id": "transformers", "token_count": 7895 }
372
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch DPT model. """ import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class DPTModelTester: def __init__( self, parent, batch_size=2, image_size=32, patch_size=16, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, backbone_out_indices=[0, 1, 2, 3], num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, num_labels=3, neck_hidden_sizes=[16, 32], is_hybrid=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.backbone_out_indices = backbone_out_indices self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope self.is_hybrid = is_hybrid self.neck_hidden_sizes = neck_hidden_sizes # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return DPTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, fusion_hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, neck_hidden_sizes=self.neck_hidden_sizes, ) def create_and_check_model(self, config, pixel_values, labels): model = DPTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_depth_estimation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = DPTForDepthEstimation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = DPTForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as DPT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () pipeline_model_mapping = ( { "depth-estimation": DPTForDepthEstimation, "feature-extraction": DPTModel, "image-segmentation": DPTForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = DPTModelTester(self) self.config_tester = ConfigTester(self, config_class=DPTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="DPT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) def test_training(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class in get_values(MODEL_MAPPING): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing: continue model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) # Skip the check for the backbone backbone_params = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": backbone_params = [f"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def test_model_from_pretrained(self): for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision @slow class DPTModelIntegrationTest(unittest.TestCase): def test_inference_depth_estimation(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large") model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 384, 384)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_semantic_segmentation(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large-ade") model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 150, 480, 480)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4)) def test_post_processing_semantic_segmentation(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large-ade") model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) outputs.logits = outputs.logits.detach().cpu() segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)]) expected_shape = torch.Size((500, 300)) self.assertEqual(segmentation[0].shape, expected_shape) segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs) expected_shape = torch.Size((480, 480)) self.assertEqual(segmentation[0].shape, expected_shape)
transformers/tests/models/dpt/test_modeling_dpt.py/0
{ "file_path": "transformers/tests/models/dpt/test_modeling_dpt.py", "repo_id": "transformers", "token_count": 6104 }
373
# coding=utf-8 # Copyright 2021-2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the EnCodec feature extractor.""" import itertools import random import unittest import numpy as np from transformers import EncodecFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch global_rng = random.Random() # Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch class EnCodecFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=1, padding_value=0.0, sampling_rate=24000, return_attention_mask=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: audio_inputs = floats_list((self.batch_size, self.max_seq_length)) else: # make sure that inputs increase in size audio_inputs = [ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: audio_inputs = [np.asarray(x) for x in audio_inputs] return audio_inputs @require_torch class EnCodecFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = EncodecFeatureExtractor def setUp(self): self.feat_extract_tester = EnCodecFeatureExtractionTester(self) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 audio_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_audio_inputs = [np.asarray(audio_input) for audio_input in audio_inputs] # Test not batched input encoded_sequences_1 = feat_extract(audio_inputs[0], return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_audio_inputs[0], return_tensors="np").input_values self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feat_extract(audio_inputs, padding=True, return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_audio_inputs, padding=True, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_double_precision_pad(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_audio_inputs = np.random.rand(100).astype(np.float64) py_audio_inputs = np_audio_inputs.tolist() for inputs in [py_audio_inputs, np_audio_inputs]: np_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="np") self.assertTrue(np_processed.input_values.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_values.dtype == torch.float32) def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech audio_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in audio_samples] def test_integration(self): # fmt: off EXPECTED_INPUT_VALUES = torch.tensor( [2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03, 3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03, 2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04, 4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03, 7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04, 4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] ) # fmt: on input_audio = self._load_datasamples(1) feature_extractor = EncodecFeatureExtractor() input_values = feature_extractor(input_audio, return_tensors="pt").input_values self.assertEquals(input_values.shape, (1, 1, 93680)) self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-6)) def test_integration_stereo(self): # fmt: off EXPECTED_INPUT_VALUES = torch.tensor( [2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03, 3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03, 2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04, 4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03, 7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04, 4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] ) # fmt: on input_audio = self._load_datasamples(1) input_audio = [np.tile(input_audio[0][None], reps=(2, 1))] input_audio[0][1] *= 0.5 feature_extractor = EncodecFeatureExtractor(feature_size=2) input_values = feature_extractor(input_audio, return_tensors="pt").input_values self.assertEquals(input_values.shape, (1, 2, 93680)) self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-6)) self.assertTrue(torch.allclose(input_values[0, 1, :30], EXPECTED_INPUT_VALUES * 0.5, atol=1e-6)) def test_truncation_and_padding(self): input_audio = self._load_datasamples(2) # would be easier if the stride was like feature_extractor = EncodecFeatureExtractor(feature_size=1, chunk_length_s=1, overlap=0.01) # pad and trunc raise an error ? with self.assertRaisesRegex( ValueError, "^Both padding and truncation were set. Make sure you only set one.$", ): truncated_outputs = feature_extractor( input_audio, padding="max_length", truncation=True, return_tensors="pt" ).input_values # truncate to chunk truncated_outputs = feature_extractor(input_audio, truncation=True, return_tensors="pt").input_values self.assertEquals(truncated_outputs.shape, (2, 1, 71520)) # 2 chunks # force truncate to max_length truncated_outputs = feature_extractor( input_audio, truncation=True, max_length=48000, return_tensors="pt" ).input_values self.assertEquals(truncated_outputs.shape, (2, 1, 48000)) # pad to chunk padded_outputs = feature_extractor(input_audio, padding=True, return_tensors="pt").input_values self.assertEquals(padded_outputs.shape, (2, 1, 95280)) # pad to chunk truncated_outputs = feature_extractor(input_audio, return_tensors="pt").input_values self.assertEquals(truncated_outputs.shape, (2, 1, 95280)) # force pad to max length truncated_outputs = feature_extractor( input_audio, padding="max_length", max_length=100000, return_tensors="pt" ).input_values self.assertEquals(truncated_outputs.shape, (2, 1, 100000)) # force no pad with self.assertRaisesRegex( ValueError, "^Unable to create tensor, you should probably activate padding with 'padding=True' to have batched tensors with the same length.$", ): truncated_outputs = feature_extractor(input_audio, padding=False, return_tensors="pt").input_values truncated_outputs = feature_extractor(input_audio[0], padding=False, return_tensors="pt").input_values self.assertEquals(truncated_outputs.shape, (1, 1, 93680)) # no pad if no chunk_length_s feature_extractor.chunk_length_s = None with self.assertRaisesRegex( ValueError, "^Unable to create tensor, you should probably activate padding with 'padding=True' to have batched tensors with the same length.$", ): truncated_outputs = feature_extractor(input_audio, padding=False, return_tensors="pt").input_values truncated_outputs = feature_extractor(input_audio[0], padding=False, return_tensors="pt").input_values self.assertEquals(truncated_outputs.shape, (1, 1, 93680)) # no pad if no overlap feature_extractor.chunk_length_s = 2 feature_extractor.overlap = None with self.assertRaisesRegex( ValueError, "^Unable to create tensor, you should probably activate padding with 'padding=True' to have batched tensors with the same length.$", ): truncated_outputs = feature_extractor(input_audio, padding=False, return_tensors="pt").input_values truncated_outputs = feature_extractor(input_audio[0], padding=False, return_tensors="pt").input_values self.assertEquals(truncated_outputs.shape, (1, 1, 93680))
transformers/tests/models/encodec/test_feature_extraction_encodec.py/0
{ "file_path": "transformers/tests/models/encodec/test_feature_extraction_encodec.py", "repo_id": "transformers", "token_count": 4914 }
374
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch FocalNet model. """ import collections import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class FocalNetModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, embed_dim=16, hidden_sizes=[32, 64, 128], depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=True, type_sequence_label_size=10, encoder_stride=8, out_features=["stage1", "stage2"], out_indices=[1, 2], ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.hidden_sizes = hidden_sizes self.depths = depths self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.type_sequence_label_size = type_sequence_label_size self.encoder_stride = encoder_stride self.out_features = out_features self.out_indices = out_indices def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return FocalNetConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, hidden_sizes=self.hidden_sizes, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels): model = FocalNetModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim)) def create_and_check_backbone(self, config, pixel_values, labels): model = FocalNetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.image_size, 8, 8]) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, config.hidden_sizes[:-1]) # verify backbone works with out_features=None config.out_features = None model = FocalNetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.image_size * 2, 4, 4]) # verify channels self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]]) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = FocalNetForMaskedImageModeling(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images config.num_channels = 1 model = FocalNetForMaskedImageModeling(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = FocalNetForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = FocalNetForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class FocalNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = FocalNetModelTester(self) self.config_tester = ConfigTester(self, config_class=FocalNetConfig, embed_dim=37, has_text_modality=False) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @unittest.skip(reason="FocalNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="FocalNet does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # FocalNet has a different seq_length patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], ) reshaped_hidden_states = outputs.reshaped_hidden_states self.assertEqual(len(reshaped_hidden_states), expected_num_layers) batch_size, num_channels, height, width = reshaped_hidden_states[0].shape reshaped_hidden_states = ( reshaped_hidden_states[0].view(batch_size, num_channels, height * width).permute(0, 2, 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], ) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) def test_hidden_states_output_with_padding(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.patch_size = 3 image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) @slow def test_model_from_pretrained(self): for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = FocalNetModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @require_vision @require_torch class FocalNetModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): # TODO update organization return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny").to(torch_device) image_processor = self.default_image_processor image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([0.2166, -0.4368, 0.2191]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) self.assertTrue(outputs.logits.argmax(dim=-1).item(), 281) @require_torch class FocalNetBackboneTest(BackboneTesterMixin, unittest.TestCase): all_model_classes = (FocalNetBackbone,) if is_torch_available() else () config_class = FocalNetConfig has_attentions = False def setUp(self): self.model_tester = FocalNetModelTester(self)
transformers/tests/models/focalnet/test_modeling_focalnet.py/0
{ "file_path": "transformers/tests/models/focalnet/test_modeling_focalnet.py", "repo_id": "transformers", "token_count": 7562 }
375
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class GLPNImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size_divisor=32, do_rescale=True, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size_divisor = size_divisor self.do_rescale = do_rescale def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } def expected_output_image_shape(self, images): if isinstance(images[0], Image.Image): width, height = images[0].size else: height, width = images[0].shape[1], images[0].shape[2] height = height // self.size_divisor * self.size_divisor width = width // self.size_divisor * self.size_divisor return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, size_divisor=self.size_divisor, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class GLPNImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = GLPNImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = GLPNImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size_divisor")) self.assertTrue(hasattr(image_processing, "resample")) self.assertTrue(hasattr(image_processing, "do_rescale")) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input (GLPNImageProcessor doesn't support batching) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape)) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input (GLPNImageProcessor doesn't support batching) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape)) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input (GLPNImageProcessor doesn't support batching) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape)) def test_call_numpy_4_channels(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors self.image_processing_class.num_channels = 4 image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input (GLPNImageProcessor doesn't support batching) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape)) self.image_processing_class.num_channels = 3
transformers/tests/models/glpn/test_image_processing_glpn.py/0
{ "file_path": "transformers/tests/models/glpn/test_image_processing_glpn.py", "repo_id": "transformers", "token_count": 2610 }
376
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch GPTNeoXJapanese model. """ import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class GPTNeoXJapaneseModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_multiple_size=4, hidden_act="gelu", hidden_dropout=0.0, attention_dropout=0.1, weight_tying=True, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_multiple_size = intermediate_multiple_size self.hidden_act = hidden_act self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.weight_tying = weight_tying self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_labels = None if self.use_labels: token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return config, input_ids, input_mask, token_labels def get_config(self): return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_multiple_size=self.intermediate_multiple_size, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, weight_tying=self.weight_tying, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): config, input_ids, input_mask, token_labels = self.prepare_config_and_inputs() config.is_decoder = True return config, input_ids, input_mask, token_labels def create_and_check_model(self, config, input_ids, input_mask): model = GPTNeoXJapaneseModel(config=config) model.to(torch_device) model.eval() _ = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder(self, config, input_ids, input_mask): config.add_cross_attention = True model = GPTNeoXJapaneseModel(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm(self, config, input_ids, input_mask, token_labels): model = GPTNeoXJapaneseForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs(self, config, input_ids, input_mask): config.is_decoder = True model = GPTNeoXJapaneseForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, attention_mask=input_mask, use_cache=True) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True) output_from_no_past = output_from_no_past["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask, token_labels = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class GPTNeoXModelJapaneseTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () all_generative_model_classes = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) test_pruning = False test_missing_keys = False test_model_parallel = False test_head_masking = False def setUp(self): self.model_tester = GPTNeoXJapaneseModelTester(self) self.config_tester = ConfigTester(self, config_class=GPTNeoXJapaneseConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(config, input_ids, input_mask) def test_model_as_decoder(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask) def test_decoder_model_past_large_inputs(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(config, input_ids, input_mask) def test_model_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) @slow def test_generation(self): model_id = "abeja/gpt-neox-japanese-2.7b" prompts = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"] # fmt: skip EXPECTED_OUTPUTS = [ "データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。", "100年後に必要とされる会社は、「人」が中心の会社です。", "フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。", "国境の長いトンネルを抜けると、そこは雪国だった。", "美味しい日本食といえば、やっぱりお寿司ですよね。", ] tokenizer = GPTNeoXJapaneseTokenizer.from_pretrained(model_id) model = GPTNeoXJapaneseForCausalLM.from_pretrained(model_id) predicted_outputs = [] for prompt in prompts: input_ids = tokenizer(prompt, return_tensors="pt").input_ids generated_ids = model.generate(input_ids, max_length=50) generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) predicted_outputs += generated_string self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS)
transformers/tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py/0
{ "file_path": "transformers/tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py", "repo_id": "transformers", "token_count": 4859 }
377
# coding=utf-8 # Copyright 2022 Google LongT5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import tempfile import unittest from transformers import LongT5Config, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, AutoTokenizer, LongT5EncoderModel, LongT5ForConditionalGeneration, LongT5Model, ) from transformers.models.longt5.modeling_longt5 import LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST class LongT5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=9, local_radius=5, encoder_attention_type="local", global_block_size=3, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, large_model_config_path="google/long-t5-local-large", ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.local_radius = local_radius self.block_len = local_radius + 1 self.encoder_attention_type = encoder_attention_type self.global_block_size = global_block_size # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers self.large_model_config_path = large_model_config_path def get_large_model_config(self): return LongT5Config.from_pretrained(self.large_model_config_path) def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_pipeline_config(self): return LongT5Config( vocab_size=166, # longt5 forces 100 extra tokens d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, local_radius=self.local_radius, encoder_attention_type=self.encoder_attention_type, global_block_size=self.global_block_size, ) def get_config(self): return LongT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, local_radius=self.local_radius, encoder_attention_type=self.encoder_attention_type, global_block_size=self.global_block_size, ) def check_prepare_lm_labels_via_shift_left( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config) model.to(torch_device) model.eval() # make sure that lm_labels are correctly padded from the right lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id) # add casaul pad token mask triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not() lm_labels.masked_fill_(triangular_mask, self.pad_token_id) decoder_input_ids = model._shift_right(lm_labels) for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)): # first item self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id) if i < decoder_input_ids_slice.shape[-1]: if i < decoder_input_ids.shape[-1] - 1: # items before diagonal self.parent.assertListEqual( decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist() ) # pad items after diagonal if i < decoder_input_ids.shape[-1] - 2: self.parent.assertListEqual( decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist() ) else: # all items after square self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist()) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_with_lm_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5ForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_decoder_model_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config).get_decoder() model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_generate_with_past_key_values( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5ForConditionalGeneration(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_encoder_decoder_shared_weights( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): for model_class in [LongT5Model, LongT5ForConditionalGeneration]: torch.manual_seed(0) model = model_class(config=config).to(torch_device).eval() # load state dict copies weights but does not tie them model.encoder.load_state_dict(model.decoder.state_dict(), strict=False) torch.manual_seed(0) tied_config = copy.deepcopy(config) tied_config.tie_encoder_decoder = True tied_model = model_class(config=tied_config).to(torch_device).eval() model_result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4 ) ) # check that outputs after saving and loading are equal with tempfile.TemporaryDirectory() as tmpdirname: tied_model.save_pretrained(tmpdirname) tied_model = model_class.from_pretrained(tmpdirname) tied_model.to(torch_device) tied_model.eval() # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4, ) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "use_cache": False, } return config, inputs_dict @require_torch class LongT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (LongT5Model, LongT5ForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (LongT5ForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": LongT5ForConditionalGeneration, "feature-extraction": LongT5Model, "summarization": LongT5ForConditionalGeneration, "text2text-generation": LongT5ForConditionalGeneration, "translation": LongT5ForConditionalGeneration, } if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_torchscript = True test_resize_embeddings = True test_model_parallel = False is_encoder_decoder = True def setUp(self): self.model_tester = LongT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_shift_right(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_decoder_model_past_with_3d_attn_mask(self): ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.model_tester.prepare_config_and_inputs() attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length], vocab_size=2, ) decoder_attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.decoder_seq_length], vocab_size=2, ) self.model_tester.create_and_check_decoder_model_attention_mask_past( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_generate_with_past_key_values(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs) def test_encoder_decoder_shared_weights(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = LongT5Model.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() model = LongT5Model(config_and_inputs[0]).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( model, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f"{tmpdirname}/longt5_test.onnx", export_params=True, opset_version=13, input_names=["input_ids", "decoder_input_ids"], ) def test_generate_with_head_masking(self): attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] max_length = config_and_inputs[1].shape[-1] + 3 model = LongT5ForConditionalGeneration(config).eval() model.to(torch_device) head_masking = { "head_mask": torch.zeros(config.num_layers, config.num_heads, device=torch_device), "decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), } for attn_name, (name, mask) in zip(attention_names, head_masking.items()): head_masks = {name: mask} # Explicitly pass decoder_head_mask as it is required from LONGT5 model when head_mask specified if name == "head_mask": head_masks["decoder_head_mask"] = torch.ones( config.num_decoder_layers, config.num_heads, device=torch_device ) out = model.generate( config_and_inputs[1], num_beams=1, max_length=max_length, output_attentions=True, return_dict_in_generate=True, **head_masks, ) # We check the state of decoder_attentions and cross_attentions just from the last step attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0) def test_attention_outputs(self): if not self.has_attentions: pass else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) block_len = getattr(self.model_tester, "block_len", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length): block_len = getattr(self.model_tester, "block_len", None) encoder_expected_shape = (batch_size, 1, config.num_attention_heads, block_len, 3 * block_len) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) @require_torch class LongT5TGlobalModelTest(LongT5ModelTest): def setUp(self): self.model_tester = LongT5ModelTester( self, encoder_attention_type="transient-global", large_model_config_path="google/long-t5-tglobal-large" ) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_attention_outputs(self): if not self.has_attentions: pass else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) block_len = getattr(self.model_tester, "block_len", None) global_block_size = getattr(self.model_tester, "global_block_size", None) global_seq_len = encoder_seq_length // global_block_size if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length): block_len = getattr(self.model_tester, "block_len", None) global_block_size = getattr(self.model_tester, "global_block_size", None) global_seq_length = seq_length // global_block_size encoder_expected_shape = ( batch_size, 1, config.num_attention_heads, block_len, 3 * block_len + global_seq_length, ) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) class LongT5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, local_radius=5, encoder_attention_type="local", global_block_size=3, # For common tests use_attention_mask=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, is_training=False, dropout_rate=0.1, initializer_factor=0.002, is_encoder_decoder=False, eos_token_id=1, pad_token_id=0, scope=None, large_model_config_path="google/long-t5-local-large", ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.local_radius = local_radius self.block_len = local_radius + 1 self.encoder_attention_type = encoder_attention_type self.global_block_size = global_block_size # For common tests self.seq_length = self.encoder_seq_length self.use_attention_mask = use_attention_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.is_encoder_decoder = is_encoder_decoder self.scope = None self.is_training = is_training self.large_model_config_path = large_model_config_path def get_large_model_config(self): return LongT5Config.from_pretrained(self.large_model_config_path) def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = LongT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=self.is_encoder_decoder, local_radius=self.local_radius, encoder_attention_type=self.encoder_attention_type, global_block_size=self.global_block_size, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = LongT5EncoderModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict class LongT5EncoderOnlyModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (LongT5EncoderModel,) if is_torch_available() else () test_pruning = False test_torchscript = True test_resize_embeddings = False test_model_parallel = False def setUp(self): self.model_tester = LongT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): if not self.has_attentions: pass else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True block_len = getattr(self.model_tester, "block_len", 4) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) class LongT5EncoderOnlyTGlobalModelTest(LongT5EncoderOnlyModelTest): def setUp(self): self.model_tester = LongT5EncoderOnlyModelTester( self, encoder_attention_type="transient-global", large_model_config_path="google/long-t5-tglobal-large" ) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_attention_outputs(self): if not self.has_attentions: pass else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True block_len = getattr(self.model_tester, "block_len", None) seq_len = getattr(self.model_tester, "seq_length", None) global_block_size = getattr(self.model_tester, "global_block_size", 4) global_seq_len = seq_len // global_block_size for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) def use_task_specific_params(model, task): model.config.update(model.config.task_specific_params[task]) @require_torch @require_sentencepiece @require_tokenizers class LongT5ModelIntegrationTests(unittest.TestCase): @cached_property def model(self): return LongT5ForConditionalGeneration.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps").to( torch_device ) @cached_property def tokenizer(self): return AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps") def expected_summary(self): return [ "background : coronary artery disease ( cad ) is the emerging cause of morbidity and mortality in" " developing world . it provides an excellent resolution for visualization of the coronaryarteries for" " catheter - based or operating interventions . although the association of this technique with major" " complications such as mortality is highly uncommon , it is frequently associated with various cardiac" " and noncardiac complications.materials and methods : in aortic stenosis , we aimed to report the" " diagnostic performance of 128-slice computed tomography coronary angiogram in 50 patients undergoing for" " major noncoron ary cardiac surgery referred" ] @slow def test_summarization(self): model = self.model tok = self.tokenizer ARTICLE = """coronary artery disease ( cad ) is the emerging cause of morbidity and mortality in developing world . \n it provides an excellent resolution for visualization of the coronary arteries for catheter - based or operating interventions . \n although the association of this technique with major complications such as mortality is highly uncommon , it is frequently associated with various cardiac and noncardiac complications . computed tomography ( ct ) coronary angiography is a promising technique for the evaluation of cad noninvasively . \n it assesses disease within the coronary artery and provides qualitative and quantitative information about nonobstructive atherosclerotic plaque burden within the vessel wall . \n thus , ct angiography - based disease evaluation may provide clinically more significant information than conventional angiography . the introduction of multi - slice computed tomography ( msct ) technology such as 64-slice , 12 8-slice , 256-slice , and now 320-slice msct has produced a high diagnostic accuracy of ct coronary angiography . \n it has consistently showed to have a very high negative predictive value ( well above 90% ) in ruling out patients with s ignificant cad defined as coronary luminal stenosis of > 50% . \n the american college of cardiology / american heart association recommends that coronary angiography should be performed before valve surgery in men aged > 40 years , women aged > 35 years with coronary risk factors and in postmenopausal women . \n the prevalence of cad in patients undergoing valve replacement is 2040% in developed countries . in the previous studies , \n the incidence of angiographically p roven cad in acquired valvular diseases has been shown to vary widely from 9% to 41% . in aortic stenosis , \n we aimed to report the diagnostic performance of 128-slice ct coronary angiography in 50 patients undergoing for major noncoron ary cardiac surgery referred for diagnostic invasive coronary angiography to assess the extent and severity of coronary stenosis . \n during january 2013 to december 2014 , we enrolled fifty major noncoronary cardiac surgery patients sche duled for invasive coronary angiography who fulfilled the following inclusion criteria of age 40 years , having low or intermediate probability of cad , left ventricular ejection fraction ( lvef ) > 35% , and patient giving informed conse nt for undergoing msct and conventional coronary angiography . \n those having any contraindication for contrast injection , lvef < 35% , high pretest probability of cad , and hemodynamic instability were excluded from the study . \n pati ents with heart rates of > 70 bpm received ( unless they had known overt heart failure or electrocardiogram ( ecg ) atrioventricular conduction abnormalities ) a single oral dose of 100 mg metoprolol 45 min before the scan . \n patients w ith heart rates of > 80 bpm received an additional oral dose of metoprolol if not contraindicated . \n all patients were scanned with a 128-slice ct scanner ( siemens , somatom definition as ) equipped with a new feature in msct technolog y , so - called z - axis flying - focus technology . \n the central 32 detector rows acquire 0.6-mm slices , and the flying - focus spot switches back and forth between 2 z positions between each reading . \n two slices per detector row a re acquired , which results in a higher oversampling rate in the z - axis , thereby reducing artifacts related to the spiral acquisition and improving spatial resolution down to 0.4 mm . \n a bolus of 6580 ml contrast material ( omnipaque ) was injected through an arm vein at a flow rate of 5 ml / s . \n a bolus tracking technique was used to synchronize the arrival of contrast in the coronary arteries with the initiation of the scan . to monitor the arrival of contrast m aterial , \n axial scans were obtained at the level of the ascending aorta with a delay of 10 s after the start of the contrast injection . \n the scan was automatically started when a threshold of 150 hounsfield units was reached in a re gion of interest positioned in the ascending aorta . \n images were reconstructed with ecg gating to obtain optimal , motion - free image quality . \n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a s ingle observer unaware of the multi - slice ct results identified coronary lesion as a single vessel , double vessel , or triple vessel disease . \n all lesion , regardless of size , were included for comparison with ct coronary angiograp hy . \n lesions were classified as having nonsignificant disease ( luminal irregularities or < 50% stenosis ) or as having significant stenosis . \n stenosis was evaluated in two orthogonal views and classified as significant if the mean lumen diameter reduction was 50% using a validated quantitative coronary angiography ( qca ) . \n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiograp hy . \n total calcium scores of all patients were calculated with dedicated software and expressed as agatston scores . \n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of th e number , areas , and peak hounsfield units of the detected calcified lesions . \n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \n maximum intensity projections were used to identify coronary lesions and ( curved ) multiplanar reconstructions to classify lesions as significant or nonsignificant . \n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \n the di agnostic performance of ct coronary angiography for the detection of significant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and positive and negative likelihood ratios with the corresponding exact 95% of confidence interval ( cis ) . \n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease p er vessel ) , and patient by patient ( no or any disease per patient ) . \n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a single observer unaware of the multi - slice ct results identified coronary lesion as a single vessel , double vessel , or triple vessel disease . \n all lesion , regardless of size , were included for comparison with ct coronary angiography . \n lesions were classified as having nonsignificant disease ( luminal irregularities or < 50% stenosis ) or as having significant stenosis . \n stenosis was evaluated in two orthogonal views and classified as significant if the mean lumen diameter reduction was 50% using a validated quantitative coronary an giography ( qca ) . \n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiography . \n total calcium scores of all patients were calculated with dedicated software and expressed as agatston scores . \n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of the number , areas , and peak hounsfield units of the detected calcified lesi ons . \n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \n maximum intensity projections were used to identify coronary lesions and ( curved ) multiplanar reconstruction s to classify lesions as significant or nonsignificant . \n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \n the diagnostic performance of ct coronary angiography for the detection of signif icant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and positive and negative likelihood ratios with the corresponding exact 95% of confidence interval ( cis ) . \n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease per vessel ) , and patient by patient ( no or any disease per patient ) . \n in this study , 29 ( 58% ) subjects were female , and 21 ( 42% ) were male showing an average age of 50.36 8.39 years . \n of fifty patients 24 ( 48% ) , 13 ( 26% ) , eight ( 16% ) , and five ( 10% ) underwent mitral valve replacement , double valve replacement ( dvr ) , aortic valve replacement , and other surgeries , respectively . \n high distribution of cad risk factors such as hypertension ( 24% ) , smoking ( 22% ) , and dyslipidemia ( 18% ) was observed in the stu dy group . \n the mean creatinine level was 0.766 0.17 and average dye used in conventional angiography was 48.5 26.6 whereas for ct angiography it was 72.8 6.32 . \n average radiation dose in conventional coronary angiography and msct coronary angiography was 5.2 msv and 9.2 msv , respectively . \n the majority of the patients had sinus rhythm ( 68% ) , whereas atrial fibrillation was found in 32% of the subjects . \n patients included in the study had low to intermed iate probability of cad . in this study , three patients had complications after conventional angiography . \n complications were of local site hematoma , acute kidney injury managed conservatively , and acute heart failure . \n a patient who developed hematoma was obese female patients with body mass index > 30 kg / m . \n the patient suffered from pseudoaneurysm , had hospitalized for 9 days , which leads to increased morbidity and cost of hospital stay . \n the diagnos tic accuracy of ct coronary angiography was evaluated regarding true positive , true negative values and is presented in table 1 . the overall sensitivity and \n specificity of ct angiography technique was 100% ( 95% ci : 39.76%100% ) and 91.30% ( 95% ci : 79.21%97.58% ) , respectively [ table 2 ] . \n the positive predictive value ( 50% ; 95% ci : 15.70%84.30% ) and negative predictive value ( 100% ; 95% ci : 91.59%100% ) of ct angiography were also fairly high in these patients . \n recent reports from multiple studies demonstrated that recent - generation msct scanners showed promise for noninvasive detection of coronary stenosis however , until now no studies were found regarding the clinical efficacy or prognostic value of 128-slice ct coronary angiography versus conventional invasive coronary angiography in the diagnosis of patients planned for major noncoronary surgeries such as dvr , bentall , atrial septal defect closure , etc . in our study , we reported 8% cad prevalence in patients planned for major noncoronary cardiac surgery . \n we performed conventional and msct coronary angiography in all patients and the results showed that ct coronary angiography with i nvasive coronary angiography as the reference standard had a considerably high sensitivity ( 100% ) and specificity ( 95.65% ) . \n the health economic model using invasive coronary angiography as the reference standard showed that at a p retest probability of cad of 70% or lower , ct coronary angiography resulted in lower cost per patient with a true positive diagnosis . at a pretest probability of cad of 70% or higher , invasive coronary angiography was associated with a lower cost per patient with a true positive diagnosis . in our study population , \n two patients developed local site complications in the form of hematoma and pseudoaneurysm after conventional angiography . \n hence , msct coronary ang iography will be more favorable in female obese patients with intermediate likelihood of cad . \n hence , msct coronary angiography will be cost - effective in patients of valvular heart diseases . \n however , ct angiography suffers from a drawback that average amount of dye used in msct coronary angiography were 72.8 6.32 ml which is higher than average amount of dye required for conventional angiography ( 48.6 26.6 ml ) . \n hence , the use of ct coronary angiography could not be used in patients with known renal dysfunction , where reduction of contrast dye load is highly advocated . \n our results show that 128-slice ct coronary angiography is a reliable technique to detect coronary stenosis in pat ients planned for noncoronary cardiac surgery . \n although there has been important technological progress in the development of ct coronary angiography , its clinical application remains limited . \n a study wth large numbers of patient s is required for the recommendation of only ct coronary angiography for the coronary evaluation in major non - cardiac surgeries . \n mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , guja rat , india ) . \n u.n . mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , gujarat , india ) . \n """ dct = tok( [ARTICLE], max_length=1024, padding="max_length", truncation=True, return_tensors="pt", ).to(torch_device) hypotheses_batch = model.generate( **dct, num_beams=4, length_penalty=2.0, max_length=142, min_length=56, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertListEqual( self.expected_summary(), decoded, ) @slow def test_inference_hidden_states(self): model = self.model input_ids = torch.tensor( [[100, 19, 3, 9, 7142, 1200, 145, 8, 1252, 14145, 2034, 812, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=torch.long, device=torch_device, ) decoder_input_ids = torch.tensor( [[100, 19, 3, 9, 7142, 1200, 145, 8, 1252, 14145, 2034, 812, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=torch.long, device=torch_device, ) attention_mask = torch.tensor( [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=torch.long, device=torch_device, ) output = model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, output_hidden_states=True ) # check if encoder_outputs match expected_output_slice = torch.tensor([0.0629, -0.1294, -0.0089, 0.0772, 0.0663], device=torch_device) self.assertTrue(torch.allclose(output.encoder_hidden_states[-1][0, 0, :5], expected_output_slice, atol=1e-4)) # check if logits match expected_output_slice = torch.tensor([5.5231, 6.1058, 3.1766, 8.2391, -5.9453], device=torch_device) self.assertTrue(torch.allclose(output.logits[0, 0, :5], expected_output_slice, atol=1e-4))
transformers/tests/models/longt5/test_modeling_longt5.py/0
{ "file_path": "transformers/tests/models/longt5/test_modeling_longt5.py", "repo_id": "transformers", "token_count": 29813 }
378
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right EN_CODE = 250004 RO_CODE = 250020 @require_sentencepiece @require_tokenizers class MBartTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = MBartTokenizer rust_tokenizer_class = MBartTokenizerFast test_rust_tokenizer = True test_sentencepiece = True def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = MBartTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_full_tokenizer(self): tokenizer = MBartTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) # overwrite from test_tokenization_common to speed up test def test_save_pretrained(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return self.tokenizers_list[0] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) tokenizer_r_files = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(tmpdirname2) # Save tokenizer rust, legacy_format=True tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it save with the same files self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) # Save tokenizer rust, legacy_format=False tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) @unittest.skip("Need to fix this after #26538") def test_training_new_tokenizer(self): pass @require_torch @require_sentencepiece @require_tokenizers class MBartEnroIntegrationTest(unittest.TestCase): checkpoint_name = "facebook/mbart-large-en-ro" src_text = [ " UN Chief Says There Is No Military Solution in Syria", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] tgt_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] expected_src_tokens = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE] @classmethod def setUpClass(cls): cls.tokenizer: MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name, src_lang="en_XX", tgt_lang="ro_RO" ) cls.pad_token_id = 1 return cls def check_language_codes(self): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"], 250001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"], 250004) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"], 250020) def test_enro_tokenizer_batch_encode_plus(self): ids = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens, ids) def test_enro_tokenizer_decode_ignores_language_codes(self): self.assertIn(RO_CODE, self.tokenizer.all_special_ids) generated_ids = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_romanian = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_romanian) self.assertNotIn(self.tokenizer.eos_token, result) def test_enro_tokenizer_truncation(self): src_text = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0], str) desired_max_length = 10 ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0] self.assertEqual(ids[-2], 2) self.assertEqual(ids[-1], EN_CODE) self.assertEqual(len(ids), desired_max_length) def test_mask_token(self): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]), [250026, 250001]) def test_special_tokens_unaffacted_by_save_load(self): tmpdirname = tempfile.mkdtemp() original_special_tokens = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(tmpdirname) new_tok = MBartTokenizer.from_pretrained(tmpdirname) self.assertDictEqual(new_tok.fairseq_tokens_to_ids, original_special_tokens) @require_torch def test_batch_fairseq_parity(self): batch = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=True, return_tensors="pt") batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def test_enro_tokenizer_prepare_batch(self): batch = self.tokenizer( self.src_text, text_target=self.tgt_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), return_tensors="pt", ) batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id) self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 14), batch.input_ids.shape) self.assertEqual((2, 14), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, result) self.assertEqual(2, batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, []) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE]) def test_seq2seq_max_length(self): batch = self.tokenizer(self.src_text, padding=True, truncation=True, max_length=3, return_tensors="pt") targets = self.tokenizer( text_target=self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors="pt" ) labels = targets["input_ids"] batch["decoder_input_ids"] = shift_tokens_right(labels, self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.decoder_input_ids.shape[1], 10) @require_torch def test_tokenizer_translation(self): inputs = self.tokenizer._build_translation_inputs( "A test", return_tensors="pt", src_lang="en_XX", tgt_lang="ar_AR" ) self.assertEqual( nested_simplify(inputs), { # A, test, EOS, en_XX "input_ids": [[62, 3034, 2, 250004]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, }, )
transformers/tests/models/mbart/test_tokenization_mbart.py/0
{ "file_path": "transformers/tests/models/mbart/test_tokenization_mbart.py", "repo_id": "transformers", "token_count": 6595 }
379
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch MobileViT model. """ import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class MobileViTConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "neck_hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) class MobileViTModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, last_hidden_size=32, num_attention_heads=4, hidden_act="silu", conv_kernel_size=3, output_stride=32, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, classifier_dropout_prob=0.1, initializer_range=0.02, is_training=True, use_labels=True, num_labels=10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.last_hidden_size = last_hidden_size self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.conv_kernel_size = conv_kernel_size self.output_stride = output_stride self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.classifier_dropout_prob = classifier_dropout_prob self.use_labels = use_labels self.is_training = is_training self.num_labels = num_labels self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, hidden_sizes=[12, 16, 20], neck_hidden_sizes=[8, 8, 16, 16, 32, 32, 32], ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = MobileViTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileViTForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileViTForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class MobileViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as MobileViT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": MobileViTModel, "image-classification": MobileViTForImageClassification, "image-segmentation": MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = MobileViTModelTester(self) self.config_tester = MobileViTConfigTester(self, config_class=MobileViTConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="MobileViT does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="MobileViT does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="MobileViT does not output attentions") def test_attention_outputs(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = 5 self.assertEqual(len(hidden_states), expected_num_stages) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. divisor = 2 for i in range(len(hidden_states)): self.assertListEqual( list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = MobileViTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class MobileViTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-1.9364, -1.2327, -0.4653]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_semantic_segmentation(self): model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small") model = model.to(torch_device) image_processor = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 21, 32, 32)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ], device=torch_device, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4)) @slow def test_post_processing_semantic_segmentation(self): model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small") model = model.to(torch_device) image_processor = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) outputs.logits = outputs.logits.detach().cpu() segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(50, 60)]) expected_shape = torch.Size((50, 60)) self.assertEqual(segmentation[0].shape, expected_shape) segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs) expected_shape = torch.Size((32, 32)) self.assertEqual(segmentation[0].shape, expected_shape)
transformers/tests/models/mobilevit/test_modeling_mobilevit.py/0
{ "file_path": "transformers/tests/models/mobilevit/test_modeling_mobilevit.py", "repo_id": "transformers", "token_count": 6033 }
380
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from PIL import Image from transformers import Owlv2ImageProcessor class Owlv2ImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size if size is not None else {"height": 18, "width": 18} self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class Owlv2ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = Owlv2ImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = Owlv2ImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict( self.image_processor_dict, size={"height": 42, "width": 42} ) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) @slow def test_image_processor_integration_test(self): processor = Owlv2ImageProcessor() image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") pixel_values = processor(image, return_tensors="pt").pixel_values mean_value = round(pixel_values.mean().item(), 4) self.assertEqual(mean_value, 0.2353) @unittest.skip("OWLv2 doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy def test_call_numpy_4_channels(self): pass
transformers/tests/models/owlv2/test_image_processor_owlv2.py/0
{ "file_path": "transformers/tests/models/owlv2/test_image_processor_owlv2.py", "repo_id": "transformers", "token_count": 1830 }
381
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch PEGASUS-X model. """ import copy import math import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import PegasusTokenizer, PegasusXConfig, PegasusXForConditionalGeneration, PegasusXModel from transformers.models.pegasus_x.modeling_pegasus_x import PegasusXDecoder, PegasusXEncoder def prepare_pegasus_x_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } @require_torch class PegasusXModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = PegasusXConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, stagger_local_blocks=False, ) inputs_dict = prepare_pegasus_x_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = PegasusXModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = PegasusXModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = PegasusXEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = PegasusXDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class PegasusXModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (PegasusXModel, PegasusXForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (PegasusXForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": PegasusXForConditionalGeneration, "feature-extraction": PegasusXModel, "summarization": PegasusXForConditionalGeneration, "text2text-generation": PegasusXForConditionalGeneration, "translation": PegasusXForConditionalGeneration, } if is_torch_available() else {} ) is_encoder_decoder = True test_pruning = False test_head_masking = False test_missing_keys = False def setUp(self): self.model_tester = PegasusXModelTester(self) self.config_tester = ConfigTester(self, config_class=PegasusXConfig) @unittest.skip( "`PegasusXGlobalLocalAttention` returns attentions as dictionary - not compatible with torchscript " ) def test_torchscript_output_attentions(self): pass def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (PegasusXModel, PegasusXForConditionalGeneration): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = PegasusXForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0]["local"].shape[-4:]), [ self.model_tester.num_attention_heads, math.ceil(encoder_seq_length / model.config.block_size), model.config.block_size, model.config.block_size + model.config.num_global_tokens, ], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0]["local"].shape[-4:]), [ self.model_tester.num_attention_heads, math.ceil(encoder_seq_length / model.config.block_size), model.config.block_size, model.config.block_size + model.config.num_global_tokens, ], ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length): encoder_expected_shape = ( batch_size, config.num_attention_heads, math.ceil(seq_length / config.block_size), config.block_size, config.block_size + config.num_global_tokens, ) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions["local"].shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, seq_length): encoder_expected_shape = (batch_size, self.round_up(seq_length, config.block_size), config.hidden_size) self.assertIsInstance(hidden_states, tuple) # Only the last layer will have the hidden states truncated back to token level self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in hidden_states[:-1]], [encoder_expected_shape] * (len(hidden_states) - 1), ) # Only the last layer will have the hidden states truncated back to token level self.assertEqual( hidden_states[-1][0].shape, (batch_size, seq_length, config.hidden_size), ) def test_hidden_states_output(self): def _check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.round_up(seq_length, config.block_size), self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True _check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True _check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] if config.is_encoder_decoder: # Seq2Seq models encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() decoder_hidden_states = outputs.decoder_hidden_states[0] decoder_hidden_states.retain_grad() if self.has_attentions: encoder_attentions = outputs.encoder_attentions[0] encoder_attentions["local"].retain_grad() encoder_attentions["global"].retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(decoder_hidden_states.grad) if self.has_attentions: self.assertIsNotNone(encoder_attentions["local"].grad) self.assertIsNotNone(encoder_attentions["global"].grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) else: # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) @classmethod def round_up(cls, n, k): return math.ceil(n / k) * k def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) TOLERANCE = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class PegasusXModelIntegrationTests(unittest.TestCase): @cached_property def default_tokenizer(self): return PegasusTokenizer.from_pretrained("google/pegasus-x-base") def test_inference_no_head(self): model = PegasusXModel.from_pretrained("google/pegasus-x-base").to(torch_device) input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) decoder_input_ids = _long_tensor([[2, 0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588]]) inputs_dict = prepare_pegasus_x_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = torch.tensor( [[0.0702, -0.1552, 0.1192], [0.0836, -0.1848, 0.1304], [0.0673, -0.1686, 0.1045]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def test_inference_head(self): model = PegasusXForConditionalGeneration.from_pretrained("google/pegasus-x-base").to(torch_device) # change to intended input input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) decoder_input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) inputs_dict = prepare_pegasus_x_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 11, model.config.vocab_size)) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = torch.tensor( [[0.0, 9.5705185, 1.5897303], [0.0, 9.833374, 1.5828674], [0.0, 10.429961, 1.5643371]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def test_seq_to_seq_generation(self): hf = PegasusXForConditionalGeneration.from_pretrained("google/pegasus-x-base-arxiv").to(torch_device) tok = PegasusTokenizer.from_pretrained("google/pegasus-x-base") batch_input = [ "While large pretrained Transformer models have proven highly capable at tackling natural language tasks," " handling long sequence inputs continues to be a significant challenge. One such task is long input" " summarization, where inputs are longer than the maximum input context of most pretrained models. Through" " an extensive set of experiments, we investigate what model architectural changes and pretraining" " paradigms can most efficiently adapt a pretrained Transformer for long input summarization. We find that" " a staggered, block-local Transformer with global encoder tokens strikes a good balance of performance" " and efficiency, and that an additional pretraining phase on long sequences meaningfully improves" " downstream summarization performance. Based on our findings, we introduce PEGASUS-X, an extension of the" " PEGASUS model with additional long input pretraining to handle inputs of up to 16K tokens. PEGASUS-X" " achieves strong performance on long input summarization tasks comparable with much larger models while" " adding few additional parameters and not requiring model parallelism to train." ] # The below article tests that we don't add any hypotheses outside of the top n_beams dct = tok.batch_encode_plus( batch_input, max_length=512, padding="max_length", truncation_strategy="only_first", truncation=True, return_tensors="pt", ) hypotheses_batch = hf.generate( input_ids=dct["input_ids"].to(torch_device), attention_mask=dct["attention_mask"].to(torch_device), num_beams=2, max_length=32, ) EXPECTED = [ "we investigate the performance of a new pretrained model for long input summarization. <n> the model is a" " superposition of two well -" ] generated = tok.batch_decode( hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True ) assert generated == EXPECTED class PegasusXStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = PegasusXConfig( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = PegasusXDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = PegasusXDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class PegasusXStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (PegasusXDecoder,) if is_torch_available() else () all_generative_model_classes = () test_pruning = False is_encoder_decoder = False test_head_masking = False def setUp( self, ): self.model_tester = PegasusXStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=PegasusXConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients return
transformers/tests/models/pegasus_x/test_modeling_pegasus_x.py/0
{ "file_path": "transformers/tests/models/pegasus_x/test_modeling_pegasus_x.py", "repo_id": "transformers", "token_count": 16624 }
382
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right EN_CODE = 50003 PYTHON_CODE = 50002 @require_sentencepiece @require_tokenizers class PLBartTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = PLBartTokenizer rust_tokenizer_class = None test_rust_tokenizer = False def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = PLBartTokenizer(SAMPLE_VOCAB, language_codes="base", keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_full_base_tokenizer(self): tokenizer = PLBartTokenizer(SAMPLE_VOCAB, language_codes="base", keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) end = tokenizer.vocab_size language_tokens = [tokenizer.convert_ids_to_tokens(x) for x in range(end - 4, end)] self.assertListEqual(language_tokens, ["__java__", "__python__", "__en_XX__", "<mask>"]) code = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" input_ids = tokenizer(code).input_ids self.assertEqual( tokenizer.decode(input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False), code, ) def test_full_multi_tokenizer(self): tokenizer = PLBartTokenizer(SAMPLE_VOCAB, language_codes="multi", keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) end = tokenizer.vocab_size language_tokens = [tokenizer.convert_ids_to_tokens(x) for x in range(end - 7, end)] self.assertListEqual( language_tokens, ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] ) code = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" input_ids = tokenizer(code).input_ids self.assertEqual( tokenizer.decode(input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False), code, ) @require_torch @require_sentencepiece @require_tokenizers class PLBartPythonEnIntegrationTest(unittest.TestCase): checkpoint_name = "uclanlp/plbart-python-en_XX" src_text = [ "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])", "def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])", ] tgt_text = [ "Returns the maximum value of a b c.", "Sums the values of a b c.", ] expected_src_tokens = [ 134, 5452, 33460, 33441, 33463, 33465, 33463, 33449, 988, 20, 33456, 19, 33456, 771, 39, 4258, 889, 3318, 33441, 33463, 33465, 33463, 33449, 2471, 2, PYTHON_CODE, ] @classmethod def setUpClass(cls): cls.tokenizer: PLBartTokenizer = PLBartTokenizer.from_pretrained( cls.checkpoint_name, language_codes="base", src_lang="python", tgt_lang="en_XX" ) cls.pad_token_id = 1 return cls def check_language_codes(self): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"], 50001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"], 50002) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"], 50003) def test_python_en_tokenizer_batch_encode_plus(self): ids = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens, ids) def test_python_en_tokenizer_decode_ignores_language_codes(self): self.assertIn(PYTHON_CODE, self.tokenizer.all_special_ids) generated_ids = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2] result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_english = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_english) self.assertNotIn(self.tokenizer.eos_token, result) def test_python_en_tokenizer_truncation(self): src_text = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20] self.assertIsInstance(src_text[0], str) desired_max_length = 10 ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0] self.assertEqual(ids[-2], 2) self.assertEqual(ids[-1], PYTHON_CODE) self.assertEqual(len(ids), desired_max_length) def test_mask_token(self): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"]), [50004, 50001]) def test_special_tokens_unaffacted_by_save_load(self): tmpdirname = tempfile.mkdtemp() original_special_tokens = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(tmpdirname) new_tok = PLBartTokenizer.from_pretrained(tmpdirname) self.assertDictEqual(new_tok.fairseq_tokens_to_ids, original_special_tokens) @require_torch def test_batch_fairseq_parity(self): batch = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=True, return_tensors="pt") batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist(), [2, PYTHON_CODE]) self.assertEqual(batch.decoder_input_ids[1][0], EN_CODE) self.assertEqual(batch.decoder_input_ids[1][-1], 2) self.assertEqual(batch.labels[1][-2:].tolist(), [2, EN_CODE]) @require_torch def test_python_en_tokenizer_prepare_batch(self): batch = self.tokenizer( self.src_text, text_target=self.tgt_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), return_tensors="pt", ) batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id) self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 26), batch.input_ids.shape) self.assertEqual((2, 26), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, result) self.assertEqual(2, batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, []) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, PYTHON_CODE]) def test_seq2seq_max_length(self): batch = self.tokenizer(self.src_text, padding=True, truncation=True, max_length=3, return_tensors="pt") targets = self.tokenizer( text_target=self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors="pt" ) labels = targets["input_ids"] batch["decoder_input_ids"] = shift_tokens_right(labels, self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.decoder_input_ids.shape[1], 10) @require_torch def test_tokenizer_translation(self): inputs = self.tokenizer._build_translation_inputs( "A test", return_tensors="pt", src_lang="en_XX", tgt_lang="java" ) self.assertEqual( nested_simplify(inputs), { # A, test, EOS, en_XX "input_ids": [[150, 242, 2, 50003]], "attention_mask": [[1, 1, 1, 1]], # java "forced_bos_token_id": 50001, }, )
transformers/tests/models/plbart/test_tokenization_plbart.py/0
{ "file_path": "transformers/tests/models/plbart/test_tokenization_plbart.py", "repo_id": "transformers", "token_count": 6876 }
383
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # Copyright 2021 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch QDQBERT model. """ import unittest from transformers import QDQBertConfig, is_torch_available from transformers.testing_utils import require_pytorch_quantization, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( QDQBertForMaskedLM, QDQBertForMultipleChoice, QDQBertForNextSentencePrediction, QDQBertForQuestionAnswering, QDQBertForSequenceClassification, QDQBertForTokenClassification, QDQBertLMHeadModel, QDQBertModel, ) from transformers.models.qdqbert.modeling_qdqbert import QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST class QDQBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): # Set default quantizers before creating the model. import pytorch_quantization.nn as quant_nn from pytorch_quantization.tensor_quant import QuantDescriptor # The default tensor quantizer is set to use Max calibration method input_desc = QuantDescriptor(num_bits=8, calib_method="max") # The default tensor quantizer is set to be per-channel quantization for weights weight_desc = QuantDescriptor(num_bits=8, axis=((0,))) quant_nn.QuantLinear.set_default_quant_desc_input(input_desc) quant_nn.QuantLinear.set_default_quant_desc_weight(weight_desc) # For the test cases, since QDQBert model is tested in one run without calibration, the quantized tensors are set as fake quantized tensors which give float type tensors in the end. quant_nn.TensorQuantizer.use_fb_fake_quant = True input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return QDQBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = QDQBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = QDQBertModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = QDQBertLMHeadModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = QDQBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_model_for_causal_lm_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = QDQBertLMHeadModel(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, encoder_hidden_states=encoder_hidden_states, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = QDQBertLMHeadModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = QDQBertForNextSentencePrediction(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = QDQBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = QDQBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = QDQBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = QDQBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch @require_pytorch_quantization class QDQBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( QDQBertModel, QDQBertForMaskedLM, QDQBertForMultipleChoice, QDQBertForNextSentencePrediction, QDQBertForQuestionAnswering, QDQBertForSequenceClassification, QDQBertForTokenClassification, QDQBertLMHeadModel, ) if is_torch_available() else () ) all_generative_model_classes = (QDQBertLMHeadModel,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": QDQBertModel, "fill-mask": QDQBertForMaskedLM, "question-answering": QDQBertForQuestionAnswering, "text-classification": QDQBertForSequenceClassification, "text-generation": QDQBertLMHeadModel, "token-classification": QDQBertForTokenClassification, "zero-shot": QDQBertForSequenceClassification, } if is_torch_available() else {} ) def setUp(self): self.model_tester = QDQBertModelTester(self) self.config_tester = ConfigTester(self, config_class=QDQBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_for_causal_lm_as_decoder(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = QDQBertModel.from_pretrained(model_name) self.assertIsNotNone(model) # Override def test_feed_forward_chunking(self): # feed forward chunking is not supported in QDQBert pass @require_torch @require_pytorch_quantization class QDQBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): # Set default quantizers before creating the model. import pytorch_quantization.nn as quant_nn from pytorch_quantization.tensor_quant import QuantDescriptor # The default tensor quantizer is set to use Max calibration method input_desc = QuantDescriptor(num_bits=8, calib_method="max") # The default tensor quantizer is set to be per-channel quantization for weights weight_desc = QuantDescriptor(num_bits=8, axis=((0,))) quant_nn.QuantLinear.set_default_quant_desc_input(input_desc) quant_nn.QuantLinear.set_default_quant_desc_weight(weight_desc) model = QDQBertModel.from_pretrained("bert-base-uncased") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[0.4571, -0.0735, 0.8594], [0.2774, -0.0278, 0.8794], [0.3548, -0.0473, 0.7593]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
transformers/tests/models/qdqbert/test_modeling_qdqbert.py/0
{ "file_path": "transformers/tests/models/qdqbert/test_modeling_qdqbert.py", "repo_id": "transformers", "token_count": 10437 }
384
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch SAM model. """ import gc import unittest import requests from transformers import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig, pipeline from transformers.testing_utils import backend_empty_cache, require_torch, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SamModel, SamProcessor from transformers.models.sam.modeling_sam import SAM_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class SamPromptEncoderTester: def __init__( self, hidden_size=32, input_image_size=24, patch_size=2, mask_input_channels=4, num_point_embeddings=4, hidden_act="gelu", ): self.hidden_size = hidden_size self.input_image_size = input_image_size self.patch_size = patch_size self.mask_input_channels = mask_input_channels self.num_point_embeddings = num_point_embeddings self.hidden_act = hidden_act def get_config(self): return SamPromptEncoderConfig( image_size=self.input_image_size, patch_size=self.patch_size, mask_input_channels=self.mask_input_channels, hidden_size=self.hidden_size, num_point_embeddings=self.num_point_embeddings, hidden_act=self.hidden_act, ) def prepare_config_and_inputs(self): dummy_points = floats_tensor([self.batch_size, 3, 2]) config = self.get_config() return config, dummy_points class SamMaskDecoderTester: def __init__( self, hidden_size=32, hidden_act="relu", mlp_dim=64, num_hidden_layers=2, num_attention_heads=4, attention_downsample_rate=2, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=32, layer_norm_eps=1e-6, ): self.hidden_size = hidden_size self.hidden_act = hidden_act self.mlp_dim = mlp_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.attention_downsample_rate = attention_downsample_rate self.num_multimask_outputs = num_multimask_outputs self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim self.layer_norm_eps = layer_norm_eps def get_config(self): return SamMaskDecoderConfig( hidden_size=self.hidden_size, hidden_act=self.hidden_act, mlp_dim=self.mlp_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, attention_downsample_rate=self.attention_downsample_rate, num_multimask_outputs=self.num_multimask_outputs, iou_head_depth=self.iou_head_depth, iou_head_hidden_dim=self.iou_head_hidden_dim, layer_norm_eps=self.layer_norm_eps, ) def prepare_config_and_inputs(self): config = self.get_config() dummy_inputs = { "image_embedding": floats_tensor([self.batch_size, self.hidden_size]), } return config, dummy_inputs class SamModelTester: def __init__( self, parent, hidden_size=36, intermediate_size=72, projection_dim=62, output_channels=32, num_hidden_layers=2, num_attention_heads=4, num_channels=3, image_size=24, patch_size=2, hidden_act="gelu", layer_norm_eps=1e-06, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, qkv_bias=True, mlp_ratio=4.0, use_abs_pos=True, use_rel_pos=True, rel_pos_zero_init=False, window_size=14, global_attn_indexes=[2, 5, 8, 11], num_pos_feats=16, mlp_dim=None, batch_size=2, ): self.parent = parent self.image_size = image_size self.patch_size = patch_size self.output_channels = output_channels self.num_channels = num_channels self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.mlp_ratio = mlp_ratio self.use_abs_pos = use_abs_pos self.use_rel_pos = use_rel_pos self.rel_pos_zero_init = rel_pos_zero_init self.window_size = window_size self.global_attn_indexes = global_attn_indexes self.num_pos_feats = num_pos_feats self.mlp_dim = mlp_dim self.batch_size = batch_size # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 self.prompt_encoder_tester = SamPromptEncoderTester() self.mask_decoder_tester = SamMaskDecoderTester() def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): vision_config = SamVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, initializer_factor=self.initializer_factor, output_channels=self.output_channels, qkv_bias=self.qkv_bias, mlp_ratio=self.mlp_ratio, use_abs_pos=self.use_abs_pos, use_rel_pos=self.use_rel_pos, rel_pos_zero_init=self.rel_pos_zero_init, window_size=self.window_size, global_attn_indexes=self.global_attn_indexes, num_pos_feats=self.num_pos_feats, mlp_dim=self.mlp_dim, ) prompt_encoder_config = self.prompt_encoder_tester.get_config() mask_decoder_config = self.mask_decoder_tester.get_config() return SamConfig( vision_config=vision_config, prompt_encoder_config=prompt_encoder_config, mask_decoder_config=mask_decoder_config, ) def create_and_check_model(self, config, pixel_values): model = SamModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual(result.iou_scores.shape, (self.batch_size, 1, 3)) self.parent.assertEqual(result.pred_masks.shape[:3], (self.batch_size, 1, 3)) def create_and_check_get_image_features(self, config, pixel_values): model = SamModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model.get_image_embeddings(pixel_values) self.parent.assertEqual(result[0].shape, (self.output_channels, 12, 12)) def create_and_check_get_image_hidden_states(self, config, pixel_values): model = SamModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model.vision_encoder( pixel_values, output_hidden_states=True, return_dict=True, ) # after computing the convolutional features expected_hidden_states_shape = (self.batch_size, 12, 12, 36) self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1) self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape) with torch.no_grad(): result = model.vision_encoder( pixel_values, output_hidden_states=True, return_dict=False, ) # after computing the convolutional features expected_hidden_states_shape = (self.batch_size, 12, 12, 36) self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1) self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SamModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (SamModel,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": SamModel, "mask-generation": SamModel} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False # TODO: Fix me @Arthur: `run_batch_test` in `tests/test_pipeline_mixin.py` not working def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = SamModelTester(self) self.vision_config_tester = ConfigTester(self, config_class=SamVisionConfig, has_text_modality=False) self.prompt_encoder_config_tester = ConfigTester( self, config_class=SamPromptEncoderConfig, has_text_modality=False, num_attention_heads=12, num_hidden_layers=2, ) self.mask_decoder_config_tester = ConfigTester( self, config_class=SamMaskDecoderConfig, has_text_modality=False ) def test_config(self): self.vision_config_tester.run_common_tests() self.prompt_encoder_config_tester.run_common_tests() self.mask_decoder_config_tester.run_common_tests() @unittest.skip(reason="SAM's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_get_image_features(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_features(*config_and_inputs) def test_image_hidden_states(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_hidden_states(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True expected_vision_attention_shape = ( self.model_tester.batch_size * self.model_tester.num_attention_heads, 196, 196, ) expected_mask_decoder_attention_shape = (self.model_tester.batch_size, 1, 144, 32) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) vision_attentions = outputs.vision_attentions self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers) mask_decoder_attentions = outputs.mask_decoder_attentions self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) vision_attentions = outputs.vision_attentions self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers) mask_decoder_attentions = outputs.mask_decoder_attentions self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers) self.assertListEqual( list(vision_attentions[0].shape[-4:]), list(expected_vision_attention_shape), ) self.assertListEqual( list(mask_decoder_attentions[0].shape[-4:]), list(expected_mask_decoder_attention_shape), ) @unittest.skip(reason="SamModel does not support training") def test_training(self): pass @unittest.skip(reason="SamModel does not support training") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="SamModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="SamModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="SamModel does not support training") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Hidden_states is tested in create_and_check_model tests") def test_hidden_states_output(self): pass def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None): # Use a slightly higher default tol to make the tests non-flaky super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol=tol, name=name, attributes=attributes) @slow def test_model_from_pretrained(self): for model_name in SAM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SamModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_image(): img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_dog_img(): img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image @slow class SamModelIntegrationTest(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) def test_inference_mask_generation_no_point(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() inputs = processor(images=raw_image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() masks = outputs.pred_masks[0, 0, 0, 0, :3] self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.4515), atol=2e-4)) self.assertTrue(torch.allclose(masks, torch.tensor([-4.1800, -3.4948, -3.4481]).to(torch_device), atol=2e-4)) def test_inference_mask_generation_one_point_one_bb(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_boxes = [[[650, 900, 1000, 1250]]] input_points = [[[820, 1080]]] inputs = processor( images=raw_image, input_boxes=input_boxes, input_points=input_points, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() masks = outputs.pred_masks[0, 0, 0, 0, :3] self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9566), atol=2e-4)) self.assertTrue( torch.allclose(masks, torch.tensor([-12.7729, -12.3665, -12.6061]).to(torch_device), atol=2e-4) ) def test_inference_mask_generation_batched_points_batched_images(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [ [[[820, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]], [[[510, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]], ] inputs = processor(images=[raw_image, raw_image], input_points=input_points, return_tensors="pt").to( torch_device ) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze().cpu() masks = outputs.pred_masks[0, 0, 0, 0, :3].cpu() EXPECTED_SCORES = torch.tensor( [ [ [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], ], [ [0.3317, 0.7264, 0.7646], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], ], ] ) EXPECTED_MASKS = torch.tensor([-2.8550, -2.7988, -2.9625]) self.assertTrue(torch.allclose(scores, EXPECTED_SCORES, atol=1e-3)) self.assertTrue(torch.allclose(masks, EXPECTED_MASKS, atol=1e-3)) def test_inference_mask_generation_one_point_one_bb_zero(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_boxes = [[[620, 900, 1000, 1255]]] input_points = [[[820, 1080]]] labels = [[0]] inputs = processor( images=raw_image, input_boxes=input_boxes, input_points=input_points, input_labels=labels, return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.7894), atol=1e-4)) def test_inference_mask_generation_one_point(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650]]] input_labels = [[1]] inputs = processor( images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9675), atol=1e-4)) # With no label input_points = [[[400, 650]]] inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9675), atol=1e-4)) def test_inference_mask_generation_two_points(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650], [800, 650]]] input_labels = [[1, 1]] inputs = processor( images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9762), atol=1e-4)) # no labels inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9762), atol=1e-4)) def test_inference_mask_generation_two_points_batched(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650], [800, 650]], [[400, 650]]] input_labels = [[1, 1], [1]] inputs = processor( images=[raw_image, raw_image], input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[0][-1], torch.tensor(0.9762), atol=1e-4)) self.assertTrue(torch.allclose(scores[1][-1], torch.tensor(0.9637), atol=1e-4)) def test_inference_mask_generation_one_box(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_boxes = [[[75, 275, 1725, 850]]] inputs = processor(images=raw_image, input_boxes=input_boxes, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.7937), atol=1e-4)) def test_inference_mask_generation_batched_image_one_point(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() raw_dog_image = prepare_dog_img() input_points = [[[820, 1080]], [[220, 470]]] inputs = processor(images=[raw_image, raw_dog_image], input_points=input_points, return_tensors="pt").to( torch_device ) with torch.no_grad(): outputs = model(**inputs) scores_batched = outputs.iou_scores.squeeze() input_points = [[[220, 470]]] inputs = processor(images=raw_dog_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores_single = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores_batched[1, :], scores_single, atol=1e-4)) def test_inference_mask_generation_two_points_point_batch(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = torch.Tensor([[[400, 650]], [[220, 470]]]).cpu() # fmt: skip input_points = input_points.unsqueeze(0) inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) iou_scores = outputs.iou_scores.cpu() self.assertTrue(iou_scores.shape == (1, 2, 3)) torch.testing.assert_allclose( iou_scores, torch.tensor([[[0.9105, 0.9825, 0.9675], [0.7646, 0.7943, 0.7774]]]), atol=1e-4, rtol=1e-4 ) def test_inference_mask_generation_three_boxes_point_batch(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() # fmt: off input_boxes = torch.Tensor([[[620, 900, 1000, 1255]], [[75, 275, 1725, 850]], [[75, 275, 1725, 850]]]).cpu() EXPECTED_IOU = torch.tensor([[[0.9773, 0.9881, 0.9522], [0.5996, 0.7661, 0.7937], [0.5996, 0.7661, 0.7937]]]) # fmt: on input_boxes = input_boxes.unsqueeze(0) inputs = processor(raw_image, input_boxes=input_boxes, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) iou_scores = outputs.iou_scores.cpu() self.assertTrue(iou_scores.shape == (1, 3, 3)) torch.testing.assert_allclose(iou_scores, EXPECTED_IOU, atol=1e-4, rtol=1e-4) def test_dummy_pipeline_generation(self): generator = pipeline("mask-generation", model="facebook/sam-vit-base", device=torch_device) raw_image = prepare_image() _ = generator(raw_image, points_per_batch=64)
transformers/tests/models/sam/test_modeling_sam.py/0
{ "file_path": "transformers/tests/models/sam/test_modeling_sam.py", "repo_id": "transformers", "token_count": 13033 }
385
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import unittest import numpy as np import pandas as pd from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TapasConfig, is_torch_available, ) from transformers.models.auto import get_values from transformers.testing_utils import require_tensorflow_probability, require_torch, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, ) from transformers.models.tapas.modeling_tapas import ( IndexMap, ProductIndexMap, flatten, gather, range_index_map, reduce_max, reduce_mean, reduce_sum, ) from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_12 else: is_torch_greater_or_equal_than_1_12 = False class TapasModelTester: """You can also import this e.g from .test_modeling_tapas import TapasModelTester""" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, max_position_embeddings=512, type_vocab_sizes=[3, 256, 256, 2, 256, 256, 10], type_sequence_label_size=2, positive_weight=10.0, num_aggregation_labels=4, num_labels=2, aggregation_loss_importance=0.8, use_answer_as_supervision=True, answer_loss_importance=0.001, use_normalized_answer_loss=False, huber_loss_delta=25.0, temperature=1.0, agg_temperature=1.0, use_gumbel_for_cells=False, use_gumbel_for_agg=False, average_approximation_function="ratio", cell_selection_preference=0.5, answer_loss_cutoff=100, max_num_rows=64, max_num_columns=32, average_logits_per_cell=True, select_one_column=True, allow_empty_column_selection=False, init_cell_selection_weights_to_zero=True, reset_position_index_per_cell=True, disable_per_token_loss=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.max_position_embeddings = max_position_embeddings self.type_vocab_sizes = type_vocab_sizes self.type_sequence_label_size = type_sequence_label_size self.positive_weight = positive_weight self.num_aggregation_labels = num_aggregation_labels self.num_labels = num_labels self.aggregation_loss_importance = aggregation_loss_importance self.use_answer_as_supervision = use_answer_as_supervision self.answer_loss_importance = answer_loss_importance self.use_normalized_answer_loss = use_normalized_answer_loss self.huber_loss_delta = huber_loss_delta self.temperature = temperature self.agg_temperature = agg_temperature self.use_gumbel_for_cells = use_gumbel_for_cells self.use_gumbel_for_agg = use_gumbel_for_agg self.average_approximation_function = average_approximation_function self.cell_selection_preference = cell_selection_preference self.answer_loss_cutoff = answer_loss_cutoff self.max_num_rows = max_num_rows self.max_num_columns = max_num_columns self.average_logits_per_cell = average_logits_per_cell self.select_one_column = select_one_column self.allow_empty_column_selection = allow_empty_column_selection self.init_cell_selection_weights_to_zero = init_cell_selection_weights_to_zero self.reset_position_index_per_cell = reset_position_index_per_cell self.disable_per_token_loss = disable_per_token_loss self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).to(torch_device) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]).to(torch_device) token_type_ids = [] for type_vocab_size in self.type_vocab_sizes: token_type_ids.append(ids_tensor(shape=[self.batch_size, self.seq_length], vocab_size=type_vocab_size)) token_type_ids = torch.stack(token_type_ids, dim=2).to(torch_device) sequence_labels = None token_labels = None labels = None numeric_values = None numeric_values_scale = None float_answer = None aggregation_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size).to(torch_device) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels).to(torch_device) labels = ids_tensor([self.batch_size, self.seq_length], vocab_size=2).to(torch_device) numeric_values = floats_tensor([self.batch_size, self.seq_length]).to(torch_device) numeric_values_scale = floats_tensor([self.batch_size, self.seq_length]).to(torch_device) float_answer = floats_tensor([self.batch_size]).to(torch_device) aggregation_labels = ids_tensor([self.batch_size], self.num_aggregation_labels).to(torch_device) config = self.get_config() return ( config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ) def get_config(self): return TapasConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_sizes=self.type_vocab_sizes, initializer_range=self.initializer_range, positive_weight=self.positive_weight, num_aggregation_labels=self.num_aggregation_labels, num_labels=self.num_labels, aggregation_loss_importance=self.aggregation_loss_importance, use_answer_as_supervision=self.use_answer_as_supervision, answer_loss_importance=self.answer_loss_importance, use_normalized_answer_loss=self.use_normalized_answer_loss, huber_loss_delta=self.huber_loss_delta, temperature=self.temperature, agg_temperature=self.agg_temperature, use_gumbel_for_cells=self.use_gumbel_for_cells, use_gumbel_for_agg=self.use_gumbel_for_agg, average_approximation_function=self.average_approximation_function, cell_selection_preference=self.cell_selection_preference, answer_loss_cutoff=self.answer_loss_cutoff, max_num_rows=self.max_num_rows, max_num_columns=self.max_num_columns, average_logits_per_cell=self.average_logits_per_cell, select_one_column=self.select_one_column, allow_empty_column_selection=self.allow_empty_column_selection, init_cell_selection_weights_to_zero=self.init_cell_selection_weights_to_zero, reset_position_index_per_cell=self.reset_position_index_per_cell, disable_per_token_loss=self.disable_per_token_loss, ) def create_and_check_model( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): model = TapasModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): model = TapasForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): # inference: without aggregation head (SQA). Model only returns logits sqa_config = copy.copy(config) sqa_config.num_aggregation_labels = 0 sqa_config.use_answer_as_supervision = False model = TapasForQuestionAnswering(config=sqa_config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) # inference: with aggregation head (WTQ, WikiSQL-supervised). Model returns logits and aggregation logits model = TapasForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) # training: can happen in 3 main ways # case 1: conversational (SQA) model = TapasForQuestionAnswering(config=sqa_config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=labels, ) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) # case 2: weak supervision for aggregation (WTQ) model = TapasForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=labels, numeric_values=numeric_values, numeric_values_scale=numeric_values_scale, float_answer=float_answer, ) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) # case 3: strong supervision for aggregation (WikiSQL-supervised) wikisql_config = copy.copy(config) wikisql_config.use_answer_as_supervision = False model = TapasForQuestionAnswering(config=wikisql_config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=labels, aggregation_labels=aggregation_labels, ) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) def create_and_check_for_sequence_classification( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): config.num_labels = self.num_labels model = TapasForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @require_torch class TapasModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TapasModel, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, ) if is_torch_available() else None ) pipeline_model_mapping = ( { "feature-extraction": TapasModel, "fill-mask": TapasForMaskedLM, "table-question-answering": TapasForQuestionAnswering, "text-classification": TapasForSequenceClassification, "zero-shot": TapasForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = True test_head_masking = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() if isinstance(v, torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device) elif model_class in get_values(MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["aggregation_labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["numeric_values"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.float, device=torch_device, ) inputs_dict["numeric_values_scale"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.float, device=torch_device, ) inputs_dict["float_answer"] = torch.zeros( self.model_tester.batch_size, dtype=torch.float, device=torch_device ) elif model_class in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class in [ *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), *get_values(MODEL_FOR_CAUSAL_LM_MAPPING), *get_values(MODEL_FOR_MASKED_LM_MAPPING), *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING), ]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = TapasModelTester(self) self.config_tester = ConfigTester(self, config_class=TapasConfig, dim=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) @require_tensorflow_probability def test_pt_tf_model_equivalence(self): super().test_pt_tf_model_equivalence() def prepare_tapas_single_inputs_for_inference(): # Here we prepare a single table-question pair to test TAPAS inference on: data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], } queries = "Which footballer is 33 years old?" table = pd.DataFrame.from_dict(data) return table, queries def prepare_tapas_batch_inputs_for_inference(): # Here we prepare a batch of 2 table-question pairs to test TAPAS inference on: data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], "Number of goals": ["712", "750"], } queries = ["Which footballer is 33 years old?", "How many goals does Ronaldo have?"] table = pd.DataFrame.from_dict(data) return table, queries def prepare_tapas_batch_inputs_for_training(): # Here we prepare a DIFFERENT batch of 2 table-question pairs to test TAPAS training on: data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], "Number of goals": ["712", "750"], } queries = ["Which footballer is 33 years old?", "What's the total number of goals?"] table = pd.DataFrame.from_dict(data) answer_coordinates = [[(0, 0)], [(0, 2), (1, 2)]] answer_text = [["Lionel Messi"], ["1462"]] float_answer = [float("NaN"), float("1462")] return table, queries, answer_coordinates, answer_text, float_answer @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @require_torch class TapasModelIntegrationTest(unittest.TestCase): @cached_property def default_tokenizer(self): return TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq") @slow def test_inference_no_head(self): # ideally we want to test this with the weights of tapas_inter_masklm_base_reset, # but since it's not straightforward to do this with the TF 1 implementation, we test it with # the weights of the WTQ base model (i.e. tapas_wtq_wikisql_sqa_inter_masklm_base_reset) model = TapasModel.from_pretrained("google/tapas-base-finetuned-wtq").to(torch_device) tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) # test the sequence output expected_slice = torch.tensor( [ [ [-0.141581565, -0.599805772, 0.747186482], [-0.143664181, -0.602008104, 0.749218345], [-0.15169853, -0.603363097, 0.741370678], ] ], device=torch_device, ) self.assertTrue(torch.allclose(outputs.last_hidden_state[:, :3, :3], expected_slice, atol=0.0005)) # test the pooled output expected_slice = torch.tensor([[0.987518311, -0.970520139, -0.994303405]], device=torch_device) self.assertTrue(torch.allclose(outputs.pooler_output[:, :3], expected_slice, atol=0.0005)) @unittest.skip(reason="Model not available yet") def test_inference_masked_lm(self): pass # TapasForQuestionAnswering has 3 possible ways of being fine-tuned: # - conversational set-up (SQA) # - weak supervision for aggregation (WTQ, WikiSQL) # - strong supervision for aggregation (WikiSQL-supervised) # We test all of them: @slow def test_inference_question_answering_head_conversational(self): # note that google/tapas-base-finetuned-sqa should correspond to tapas_sqa_inter_masklm_base_reset model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-sqa").to(torch_device) tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = torch.Size((1, 21)) self.assertEqual(logits.shape, expected_shape) expected_tensor = torch.tensor( [ [ -9997.22461, -9997.22461, -9997.22461, -9997.22461, -9997.22461, -9997.22461, -9997.22461, -9997.22461, -9997.22461, -16.2628059, -10004.082, 15.4330549, 15.4330549, 15.4330549, -9990.42, -16.3270779, -16.3270779, -16.3270779, -16.3270779, -16.3270779, -10004.8506, ] ], device=torch_device, ) self.assertTrue(torch.allclose(logits, expected_tensor, atol=0.015)) @slow def test_inference_question_answering_head_conversational_absolute_embeddings(self): # note that google/tapas-small-finetuned-sqa should correspond to tapas_sqa_inter_masklm_small_reset # however here we test the version with absolute position embeddings model = TapasForQuestionAnswering.from_pretrained("google/tapas-small-finetuned-sqa", revision="no_reset").to( torch_device ) tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = torch.Size((1, 21)) self.assertEqual(logits.shape, expected_shape) expected_tensor = torch.tensor( [ [ -10014.7793, -10014.7793, -10014.7793, -10014.7793, -10014.7793, -10014.7793, -10014.7793, -10014.7793, -10014.7793, -18.8419304, -10018.0391, 17.7848816, 17.7848816, 17.7848816, -9981.02832, -16.4005489, -16.4005489, -16.4005489, -16.4005489, -16.4005489, -10013.4736, ] ], device=torch_device, ) self.assertTrue(torch.allclose(logits, expected_tensor, atol=0.01)) @slow def test_inference_question_answering_head_weak_supervision(self): # note that google/tapas-base-finetuned-wtq should correspond to tapas_wtq_wikisql_sqa_inter_masklm_base_reset model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq").to(torch_device) tokenizer = self.default_tokenizer # let's test on a batch table, queries = prepare_tapas_batch_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="pt") inputs_on_device = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs_on_device) # test the logits logits = outputs.logits expected_shape = torch.Size((2, 28)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [ [-160.375504, -160.375504, -160.375504, -10072.3965, -10070.9414, -10094.9736], [-9861.6123, -9861.6123, -9861.6123, -9861.6123, -9891.01172, 146.600677], ], device=torch_device, ) self.assertTrue(torch.allclose(logits[:, -6:], expected_slice, atol=0.4)) # test the aggregation logits logits_aggregation = outputs.logits_aggregation expected_shape = torch.Size((2, 4)) self.assertEqual(logits_aggregation.shape, expected_shape) expected_tensor = torch.tensor( [[18.8545208, -9.76614857, -6.3128891, -2.93525243], [-4.05782509, 40.0351, -5.35329962, 23.3978653]], device=torch_device, ) self.assertTrue(torch.allclose(logits_aggregation, expected_tensor, atol=0.001)) # test the predicted answer coordinates and aggregation indices EXPECTED_PREDICTED_ANSWER_COORDINATES = [[(0, 0)], [(1, 2)]] EXPECTED_PREDICTED_AGGREGATION_INDICES = [0, 1] predicted_answer_coordinates, predicted_aggregation_indices = tokenizer.convert_logits_to_predictions( inputs, outputs.logits.detach().cpu(), outputs.logits_aggregation.detach().cpu() ) self.assertEqual(EXPECTED_PREDICTED_ANSWER_COORDINATES, predicted_answer_coordinates) self.assertEqual(EXPECTED_PREDICTED_AGGREGATION_INDICES, predicted_aggregation_indices) @slow def test_training_question_answering_head_weak_supervision(self): # note that google/tapas-base-finetuned-wtq should correspond to tapas_wtq_wikisql_sqa_inter_masklm_base_reset model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq").to(torch_device) model.to(torch_device) # normally we should put the model in training mode but it's a pain to do this with the TF 1 implementation tokenizer = self.default_tokenizer # let's test on a batch table, queries, answer_coordinates, answer_text, float_answer = prepare_tapas_batch_inputs_for_training() inputs = tokenizer( table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, padding="longest", return_tensors="pt", ) # prepare data (created by the tokenizer) and move to torch_device input_ids = inputs["input_ids"].to(torch_device) attention_mask = inputs["attention_mask"].to(torch_device) token_type_ids = inputs["token_type_ids"].to(torch_device) labels = inputs["labels"].to(torch_device) numeric_values = inputs["numeric_values"].to(torch_device) numeric_values_scale = inputs["numeric_values_scale"].to(torch_device) # the answer should be prepared by the user float_answer = torch.FloatTensor(float_answer).to(torch_device) # forward pass to get loss + logits: with torch.no_grad(): outputs = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels, numeric_values=numeric_values, numeric_values_scale=numeric_values_scale, float_answer=float_answer, ) # test the loss loss = outputs.loss expected_loss = torch.tensor(3.3527612686157227e-08, device=torch_device) self.assertTrue(torch.allclose(loss, expected_loss, atol=1e-6)) # test the logits on the first example logits = outputs.logits expected_shape = torch.Size((2, 29)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [ -160.0156, -160.0156, -160.0156, -160.0156, -160.0156, -10072.2266, -10070.8896, -10092.6006, -10092.6006, ], device=torch_device, ) self.assertTrue(torch.allclose(logits[0, -9:], expected_slice, atol=1e-6)) # test the aggregation logits on the second example logits_aggregation = outputs.logits_aggregation expected_shape = torch.Size((2, 4)) self.assertEqual(logits_aggregation.shape, expected_shape) expected_slice = torch.tensor([-4.0538, 40.0304, -5.3554, 23.3965], device=torch_device) self.assertTrue(torch.allclose(logits_aggregation[1, -4:], expected_slice, atol=1e-4)) @slow def test_inference_question_answering_head_strong_supervision(self): # note that google/tapas-base-finetuned-wikisql-supervised should correspond to tapas_wikisql_sqa_inter_masklm_base_reset model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wikisql-supervised").to( torch_device ) tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = torch.Size((1, 21)) self.assertEqual(logits.shape, expected_shape) expected_tensor = torch.tensor( [ [ -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -18.6185989, -10008.7969, 17.6355762, 17.6355762, 17.6355762, -10002.4404, -18.7111301, -18.7111301, -18.7111301, -18.7111301, -18.7111301, -10007.0977, ] ], device=torch_device, ) self.assertTrue(torch.allclose(logits, expected_tensor, atol=0.02)) # test the aggregation logits logits_aggregation = outputs.logits_aggregation expected_shape = torch.Size((1, 4)) self.assertEqual(logits_aggregation.shape, expected_shape) expected_tensor = torch.tensor( [[16.5659733, -3.06624889, -2.34152961, -0.970244825]], device=torch_device ) # PyTorch model outputs [[16.5679, -3.0668, -2.3442, -0.9674]] self.assertTrue(torch.allclose(logits_aggregation, expected_tensor, atol=0.003)) @slow def test_inference_classification_head(self): # note that google/tapas-base-finetuned-tabfact should correspond to tapas_tabfact_inter_masklm_base_reset model = TapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact").to(torch_device) tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) # test the classification logits logits = outputs.logits expected_shape = torch.Size((1, 2)) self.assertEqual(logits.shape, expected_shape) expected_tensor = torch.tensor( [[0.795137286, 9.5572]], device=torch_device ) # Note that the PyTorch model outputs [[0.8057, 9.5281]] self.assertTrue(torch.allclose(outputs.logits, expected_tensor, atol=0.05)) # Below: tests for Tapas utilities which are defined in modeling_tapas.py. # These are based on segmented_tensor_test.py of the original implementation. # URL: https://github.com/google-research/tapas/blob/master/tapas/models/segmented_tensor_test.py @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @require_torch class TapasUtilitiesTest(unittest.TestCase): def _prepare_tables(self): """Prepares two tables, both with three distinct rows. The first table has two columns: 1.0, 2.0 | 3.0 2.0, 0.0 | 1.0 1.0, 3.0 | 4.0 The second table has three columns: 1.0 | 2.0 | 3.0 2.0 | 0.0 | 1.0 1.0 | 3.0 | 4.0 Returns: SegmentedTensors with the tables. """ values = torch.tensor( [ [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], ] ) row_index = IndexMap( indices=torch.tensor( [ [[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 0, 0], [1, 1, 1], [2, 2, 2]], ] ), num_segments=3, batch_dims=1, ) col_index = IndexMap( indices=torch.tensor( [ [[0, 0, 1], [0, 0, 1], [0, 0, 1]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]], ] ), num_segments=3, batch_dims=1, ) return values, row_index, col_index def test_product_index(self): _, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_index_proj = cell_index.project_outer(cell_index) col_index_proj = cell_index.project_inner(cell_index) ind = cell_index.indices self.assertEqual(cell_index.num_segments, 9) # Projections should give back the original indices. # we use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(row_index.indices.numpy(), row_index_proj.indices.numpy()) self.assertEqual(row_index.num_segments, row_index_proj.num_segments) self.assertEqual(row_index.batch_dims, row_index_proj.batch_dims) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(col_index.indices.numpy(), col_index_proj.indices.numpy()) self.assertEqual(col_index.batch_dims, col_index_proj.batch_dims) # The first and second "column" are identified in the first table. for i in range(3): self.assertEqual(ind[0, i, 0], ind[0, i, 1]) self.assertNotEqual(ind[0, i, 0], ind[0, i, 2]) # All rows are distinct in the first table. for i, i_2 in zip(range(3), range(3)): for j, j_2 in zip(range(3), range(3)): if i != i_2 and j != j_2: self.assertNotEqual(ind[0, i, j], ind[0, i_2, j_2]) # All cells are distinct in the second table. for i, i_2 in zip(range(3), range(3)): for j, j_2 in zip(range(3), range(3)): if i != i_2 or j != j_2: self.assertNotEqual(ind[1, i, j], ind[1, i_2, j_2]) def test_flatten(self): _, row_index, col_index = self._prepare_tables() row_index_flat = flatten(row_index) col_index_flat = flatten(col_index) shape = [3, 4, 5] batched_index = IndexMap(indices=torch.zeros(shape).type(torch.LongTensor), num_segments=1, batch_dims=3) batched_index_flat = flatten(batched_index) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal( row_index_flat.indices.numpy(), [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5] ) np.testing.assert_array_equal( col_index_flat.indices.numpy(), [0, 0, 1, 0, 0, 1, 0, 0, 1, 3, 4, 5, 3, 4, 5, 3, 4, 5] ) self.assertEqual(batched_index_flat.num_segments.numpy(), np.prod(shape)) np.testing.assert_array_equal(batched_index_flat.indices.numpy(), range(np.prod(shape))) def test_range_index_map(self): batch_shape = [3, 4] num_segments = 5 index = range_index_map(batch_shape, num_segments) self.assertEqual(num_segments, index.num_segments) self.assertEqual(2, index.batch_dims) indices = index.indices # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(list(indices.size()), [3, 4, 5]) for i in range(batch_shape[0]): for j in range(batch_shape[1]): # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(indices[i, j, :].numpy(), range(num_segments)) def test_reduce_sum(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_sum, _ = reduce_sum(values, row_index) col_sum, _ = reduce_sum(values, col_index) cell_sum, _ = reduce_sum(values, cell_index) # We use np.testing.assert_allclose rather than Tensorflow's assertAllClose np.testing.assert_allclose(row_sum.numpy(), [[6.0, 3.0, 8.0], [6.0, 3.0, 8.0]]) np.testing.assert_allclose(col_sum.numpy(), [[9.0, 8.0, 0.0], [4.0, 5.0, 8.0]]) np.testing.assert_allclose( cell_sum.numpy(), [[3.0, 3.0, 0.0, 2.0, 1.0, 0.0, 4.0, 4.0, 0.0], [1.0, 2.0, 3.0, 2.0, 0.0, 1.0, 1.0, 3.0, 4.0]], ) def test_reduce_mean(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_mean, _ = reduce_mean(values, row_index) col_mean, _ = reduce_mean(values, col_index) cell_mean, _ = reduce_mean(values, cell_index) # We use np.testing.assert_allclose rather than Tensorflow's assertAllClose np.testing.assert_allclose( row_mean.numpy(), [[6.0 / 3.0, 3.0 / 3.0, 8.0 / 3.0], [6.0 / 3.0, 3.0 / 3.0, 8.0 / 3.0]] ) np.testing.assert_allclose(col_mean.numpy(), [[9.0 / 6.0, 8.0 / 3.0, 0.0], [4.0 / 3.0, 5.0 / 3.0, 8.0 / 3.0]]) np.testing.assert_allclose( cell_mean.numpy(), [ [3.0 / 2.0, 3.0, 0.0, 2.0 / 2.0, 1.0, 0.0, 4.0 / 2.0, 4.0, 0.0], [1.0, 2.0, 3.0, 2.0, 0.0, 1.0, 1.0, 3.0, 4.0], ], ) def test_reduce_max(self): values = torch.as_tensor([2.0, 1.0, 0.0, 3.0]) index = IndexMap(indices=torch.as_tensor([0, 1, 0, 1]), num_segments=2) maximum, _ = reduce_max(values, index) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(maximum.numpy(), [2, 3]) def test_reduce_sum_vectorized(self): values = torch.as_tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0], [3.0, 4.0, 5.0]]) index = IndexMap(indices=torch.as_tensor([[0, 0, 1]]), num_segments=2, batch_dims=0) sums, new_index = reduce_sum(values, index) # We use np.testing.assert_allclose rather than Tensorflow's assertAllClose np.testing.assert_allclose(sums.numpy(), [3.0, 3.0]) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(new_index.indices.numpy(), [0, 1]) np.testing.assert_array_equal(new_index.num_segments.numpy(), 2) np.testing.assert_array_equal(new_index.batch_dims, 0) def test_gather(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) # Compute sums and then gather. The result should have the same shape as # the original table and each element should contain the sum the values in # its cell. sums, _ = reduce_sum(values, cell_index) cell_sum = gather(sums, cell_index) assert cell_sum.size() == values.size() # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_allclose( cell_sum.numpy(), [[[3.0, 3.0, 3.0], [2.0, 2.0, 1.0], [4.0, 4.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]]], ) def test_gather_vectorized(self): values = torch.as_tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) index = IndexMap(indices=torch.as_tensor([[0, 1], [1, 0]]), num_segments=2, batch_dims=1) result = gather(values, index) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(result.numpy(), [[[1, 2], [3, 4]], [[7, 8], [5, 6]]])
transformers/tests/models/tapas/test_modeling_tapas.py/0
{ "file_path": "transformers/tests/models/tapas/test_modeling_tapas.py", "repo_id": "transformers", "token_count": 22253 }
386
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch VideoMAE model. """ import copy import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class VideoMAEModelTester: def __init__( self, parent, batch_size=13, image_size=10, num_channels=3, patch_size=2, tubelet_size=2, num_frames=2, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, mask_ratio=0.9, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.patch_size = patch_size self.tubelet_size = tubelet_size self.num_frames = num_frames self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.mask_ratio = mask_ratio self.scope = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame self.num_patches_per_frame = (image_size // patch_size) ** 2 self.seq_length = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos self.num_masks = int(mask_ratio * self.seq_length) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return VideoMAEConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, decoder_hidden_size=self.hidden_size, decoder_intermediate_size=self.intermediate_size, decoder_num_attention_heads=self.num_attention_heads, decoder_num_hidden_layers=self.num_hidden_layers, ) def create_and_check_model(self, config, pixel_values, labels): model = VideoMAEModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_pretraining(self, config, pixel_values, labels): model = VideoMAEForPreTraining(config) model.to(torch_device) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch mask = torch.ones((self.num_masks,)) mask = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))]) bool_masked_pos = mask.expand(self.batch_size, -1).bool() result = model(pixel_values, bool_masked_pos) # model only returns predictions for masked patches num_masked_patches = mask.sum().item() decoder_num_labels = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class VideoMAEModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as VideoMAE does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification} if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = VideoMAEModelTester(self) self.config_tester = ConfigTester(self, config_class=VideoMAEConfig, has_text_modality=False, hidden_size=37) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch mask = torch.ones((self.model_tester.num_masks,)) mask = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))]) bool_masked_pos = mask.expand(self.model_tester.batch_size, -1).bool() inputs_dict["bool_masked_pos"] = bool_masked_pos.to(torch_device) if return_labels: if model_class in [ *get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="VideoMAE does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = VideoMAEModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): if not self.has_attentions: pass else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: num_visible_patches = self.model_tester.seq_length - self.model_tester.num_masks seq_len = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(hidden_states), expected_num_layers) num_visible_patches = self.model_tester.seq_length - self.model_tester.num_masks seq_length = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # We will verify our results on a video of eating spaghetti # Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227] def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) video = np.load(file) return list(video) @require_torch @require_vision class VideoMAEModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]) if is_vision_available() else None ) @slow def test_inference_for_video_classification(self): model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics").to( torch_device ) image_processor = self.default_image_processor video = prepare_video() inputs = image_processor(video, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 400)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([0.3669, -0.0688, -0.2421]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_for_pretraining(self): model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short").to(torch_device) image_processor = self.default_image_processor video = prepare_video() inputs = image_processor(video, return_tensors="pt").to(torch_device) # add boolean mask, indicating which patches to mask local_path = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos", filename="bool_masked_pos.pt") inputs["bool_masked_pos"] = torch.load(local_path) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor( [[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]], device=torch_device ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4)) # verify the loss (`config.norm_pix_loss` = `True`) expected_loss = torch.tensor([0.5142], device=torch_device) self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=1e-4)) # verify the loss (`config.norm_pix_loss` = `False`) model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short", norm_pix_loss=False).to( torch_device ) with torch.no_grad(): outputs = model(**inputs) expected_loss = torch.tensor(torch.tensor([0.6469]), device=torch_device) self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=1e-4))
transformers/tests/models/videomae/test_modeling_videomae.py/0
{ "file_path": "transformers/tests/models/videomae/test_modeling_videomae.py", "repo_id": "transformers", "token_count": 7450 }
387
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch VisualBERT model. """ import copy import unittest from transformers import VisualBertConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForRegionToPhraseAlignment, VisualBertForVisualReasoning, VisualBertModel, ) from transformers.models.visual_bert.modeling_visual_bert import VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST class VisualBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, visual_seq_length=5, is_training=True, use_attention_mask=True, use_visual_attention_mask=True, use_token_type_ids=True, use_visual_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, visual_embedding_dim=20, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.visual_seq_length = visual_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_visual_attention_mask = use_visual_attention_mask self.use_token_type_ids = use_token_type_ids self.use_visual_token_type_ids = use_visual_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.visual_embedding_dim = visual_embedding_dim self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def get_config(self): return VisualBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, visual_embedding_dim=self.visual_embedding_dim, num_labels=self.num_labels, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) visual_embeds = floats_tensor([self.batch_size, self.visual_seq_length, self.visual_embedding_dim]) attention_mask = None if self.use_attention_mask: attention_mask = torch.ones((self.batch_size, self.seq_length), dtype=torch.long, device=torch_device) visual_attention_mask = None if self.use_visual_attention_mask: visual_attention_mask = torch.ones( (self.batch_size, self.visual_seq_length), dtype=torch.long, device=torch_device ) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) visual_token_type_ids = None if self.use_visual_token_type_ids: visual_token_type_ids = ids_tensor([self.batch_size, self.visual_seq_length], self.type_vocab_size) config = self.get_config() return config, { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask, } def prepare_config_and_inputs_for_pretraining(self): masked_lm_labels = None sentence_image_labels = None if self.use_labels: masked_lm_labels = ids_tensor([self.batch_size, self.seq_length + self.visual_seq_length], self.vocab_size) sentence_image_labels = ids_tensor( [self.batch_size], self.type_sequence_label_size, ) config, input_dict = self.prepare_config_and_inputs_for_common() input_dict.update({"labels": masked_lm_labels, "sentence_image_labels": sentence_image_labels}) return config, input_dict def prepare_config_and_inputs_for_multiple_choice(self): input_ids = ids_tensor([self.batch_size, self.num_choices, self.seq_length], self.vocab_size) visual_embeds = floats_tensor( [self.batch_size, self.num_choices, self.visual_seq_length, self.visual_embedding_dim] ) attention_mask = None if self.use_attention_mask: attention_mask = torch.ones( (self.batch_size, self.num_choices, self.seq_length), dtype=torch.long, device=torch_device ) visual_attention_mask = None if self.use_visual_attention_mask: visual_attention_mask = torch.ones( (self.batch_size, self.num_choices, self.visual_seq_length), dtype=torch.long, device=torch_device ) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.num_choices, self.seq_length], self.type_vocab_size) visual_token_type_ids = None if self.use_visual_token_type_ids: visual_token_type_ids = ids_tensor( [self.batch_size, self.num_choices, self.visual_seq_length], self.type_vocab_size ) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask, "labels": labels, } def prepare_config_and_inputs_for_vqa(self): vqa_labels = None if self.use_labels: vqa_labels = floats_tensor([self.batch_size, self.num_labels]) config, input_dict = self.prepare_config_and_inputs_for_common() input_dict.update({"labels": vqa_labels}) return config, input_dict def prepare_config_and_inputs_for_nlvr(self): nlvr_labels = None if self.use_labels: nlvr_labels = ids_tensor([self.batch_size], self.num_labels) config, input_dict = self.prepare_config_and_inputs_for_common() input_dict.update({"labels": nlvr_labels}) return config, input_dict def prepare_config_and_inputs_for_flickr(self): region_to_phrase_position = torch.cat( ( ids_tensor([self.batch_size, self.seq_length], self.visual_seq_length), torch.ones(self.batch_size, self.visual_seq_length, dtype=torch.long, device=torch_device) * -1, ), dim=-1, ) flickr_labels = None if self.use_labels: flickr_labels = floats_tensor( [self.batch_size, self.seq_length + self.visual_seq_length, self.visual_seq_length] ) config, input_dict = self.prepare_config_and_inputs_for_common() input_dict.update({"region_to_phrase_position": region_to_phrase_position, "labels": flickr_labels}) return config, input_dict def create_and_check_model(self, config, input_dict): model = VisualBertModel(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.seq_length + self.visual_seq_length, self.hidden_size), ) def create_and_check_for_pretraining(self, config, input_dict): model = VisualBertForPreTraining(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual( result.prediction_logits.shape, (self.batch_size, self.seq_length + self.visual_seq_length, self.vocab_size), ) def create_and_check_for_vqa(self, config, input_dict): model = VisualBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice(self, config, input_dict): model = VisualBertForMultipleChoice(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_nlvr(self, config, input_dict): model = VisualBertForVisualReasoning(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_flickr(self, config, input_dict): model = VisualBertForRegionToPhraseAlignment(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.seq_length + self.visual_seq_length, self.visual_seq_length) ) @require_torch class VisualBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( VisualBertModel, VisualBertForMultipleChoice, VisualBertForVisualReasoning, VisualBertForRegionToPhraseAlignment, VisualBertForQuestionAnswering, VisualBertForPreTraining, ) if is_torch_available() else () ) pipeline_model_mapping = {"feature-extraction": VisualBertModel} if is_torch_available() else {} test_torchscript = False test_pruning = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class == VisualBertForMultipleChoice: for key in inputs_dict.keys(): value = inputs_dict[key] if isinstance(value, torch.Tensor) and value.ndim > 1: if key != "visual_embeds": inputs_dict[key] = ( inputs_dict[key].unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() ) else: inputs_dict[key] = ( inputs_dict[key] .unsqueeze(1) .expand(-1, self.model_tester.num_choices, -1, self.model_tester.visual_embedding_dim) .contiguous() ) elif model_class == VisualBertForRegionToPhraseAlignment: total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length batch_size = self.model_tester.batch_size inputs_dict["region_to_phrase_position"] = torch.zeros( (batch_size, total_length), dtype=torch.long, device=torch_device, ) if return_labels: if model_class == VisualBertForMultipleChoice: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class == VisualBertForPreTraining: total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length batch_size = self.model_tester.batch_size inputs_dict["labels"] = torch.zeros( (batch_size, total_length), dtype=torch.long, device=torch_device, ) inputs_dict["sentence_image_labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) # Flickr expects float labels elif model_class == VisualBertForRegionToPhraseAlignment: batch_size = self.model_tester.batch_size total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length inputs_dict["labels"] = torch.ones( ( batch_size, total_length, self.model_tester.visual_seq_length, ), dtype=torch.float, device=torch_device, ) # VQA expects float labels elif model_class == VisualBertForQuestionAnswering: inputs_dict["labels"] = torch.ones( (self.model_tester.batch_size, self.model_tester.num_labels), dtype=torch.float, device=torch_device, ) elif model_class == VisualBertForVisualReasoning: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size), dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = VisualBertModelTester(self) self.config_tester = ConfigTester(self, config_class=VisualBertConfig, hidden_size=37) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) visual_seq_len = getattr(self.model_tester, "visual_seq_length", None) encoder_seq_length = (seq_len if seq_len is not None else 0) + ( visual_seq_len if visual_seq_len is not None else 0 ) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length + self.model_tester.visual_seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_pretraining() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_model_for_vqa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_vqa() self.model_tester.create_and_check_for_vqa(*config_and_inputs) def test_model_for_nlvr(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_nlvr() self.model_tester.create_and_check_for_nlvr(*config_and_inputs) def test_model_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_multiple_choice() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_model_for_flickr(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_flickr() self.model_tester.create_and_check_for_flickr(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = VisualBertModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @require_torch class VisualBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_vqa_coco_pre(self): model = VisualBertForPreTraining.from_pretrained("uclanlp/visualbert-vqa-coco-pre") input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1) token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1) visual_embeds = torch.ones(size=(1, 10, 2048), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long) attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) vocab_size = 30522 expected_shape = torch.Size((1, 16, vocab_size)) self.assertEqual(output.prediction_logits.shape, expected_shape) expected_slice = torch.tensor( [[[-5.1858, -5.1903, -4.9142], [-6.2214, -5.9238, -5.8381], [-6.3027, -5.9939, -5.9297]]] ) self.assertTrue(torch.allclose(output.prediction_logits[:, :3, :3], expected_slice, atol=1e-4)) expected_shape_2 = torch.Size((1, 2)) self.assertEqual(output.seq_relationship_logits.shape, expected_shape_2) expected_slice_2 = torch.tensor([[0.7393, 0.1754]]) self.assertTrue(torch.allclose(output.seq_relationship_logits, expected_slice_2, atol=1e-4)) @slow def test_inference_vqa(self): model = VisualBertForQuestionAnswering.from_pretrained("uclanlp/visualbert-vqa") input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1) token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1) visual_embeds = torch.ones(size=(1, 10, 2048), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long) attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) # vocab_size = 30522 expected_shape = torch.Size((1, 3129)) self.assertEqual(output.logits.shape, expected_shape) expected_slice = torch.tensor( [[-8.9898, 3.0803, -1.8016, 2.4542, -8.3420, -2.0224, -3.3124, -4.4139, -3.1491, -3.8997]] ) self.assertTrue(torch.allclose(output.logits[:, :10], expected_slice, atol=1e-4)) @slow def test_inference_nlvr(self): model = VisualBertForVisualReasoning.from_pretrained("uclanlp/visualbert-nlvr2") input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1) token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1) visual_embeds = torch.ones(size=(1, 10, 1024), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long) attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) # vocab_size = 30522 expected_shape = torch.Size((1, 2)) self.assertEqual(output.logits.shape, expected_shape) expected_slice = torch.tensor([[-1.1436, 0.8900]]) self.assertTrue(torch.allclose(output.logits, expected_slice, atol=1e-4)) @slow def test_inference_vcr(self): model = VisualBertForMultipleChoice.from_pretrained("uclanlp/visualbert-vcr") input_ids = torch.tensor([[[1, 2, 3, 4, 5, 6] for i in range(4)]], dtype=torch.long) attention_mask = torch.ones_like(input_ids) token_type_ids = torch.ones_like(input_ids) visual_embeds = torch.ones(size=(1, 4, 10, 512), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 4, 10), dtype=torch.long) visual_attention_mask = torch.ones_like(visual_token_type_ids) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) # vocab_size = 30522 expected_shape = torch.Size((1, 4)) self.assertEqual(output.logits.shape, expected_shape) expected_slice = torch.tensor([[-7.7697, -7.7697, -7.7697, -7.7697]]) self.assertTrue(torch.allclose(output.logits, expected_slice, atol=1e-4))
transformers/tests/models/visual_bert/test_modeling_visual_bert.py/0
{ "file_path": "transformers/tests/models/visual_bert/test_modeling_visual_bert.py", "repo_id": "transformers", "token_count": 14046 }
388
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VitMatteImageProcessor class VitMatteImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_rescale=True, rescale_factor=0.5, do_pad=True, size_divisibility=10, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.size_divisibility = size_divisibility self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, "size_divisibility": self.size_divisibility, } def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class VitMatteImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = VitMatteImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = VitMatteImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "size_divisibility")) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input (image processor does not support batched inputs) image = image_inputs[0] trimap = np.random.randint(0, 3, size=image.shape[:2]) encoded_images = image_processing(images=image, trimaps=trimap, return_tensors="pt").pixel_values # Verify that width and height can be divided by size_divisibility self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisibility == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisibility == 0) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input (image processor does not support batched inputs) image = image_inputs[0] trimap = np.random.randint(0, 3, size=image.shape[:2]) encoded_images = image_processing(images=image, trimaps=trimap, return_tensors="pt").pixel_values # Verify that width and height can be divided by size_divisibility self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisibility == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisibility == 0) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input (image processor does not support batched inputs) image = image_inputs[0] trimap = np.random.randint(0, 3, size=image.size[::-1]) encoded_images = image_processing(images=image, trimaps=trimap, return_tensors="pt").pixel_values # Verify that width and height can be divided by size_divisibility self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisibility == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisibility == 0) def test_call_numpy_4_channels(self): # Test that can process images which have an arbitrary number of channels # Initialize image_processing image_processor = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors self.image_processor_tester.num_channels = 4 image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) # Test not batched input (image processor does not support batched inputs) image = image_inputs[0] trimap = np.random.randint(0, 3, size=image.shape[:2]) encoded_images = image_processor( images=image, trimaps=trimap, input_data_format="channels_first", image_mean=0, image_std=1, return_tensors="pt", ).pixel_values # Verify that width and height can be divided by size_divisibility self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisibility == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisibility == 0) def test_padding(self): image_processing = self.image_processing_class(**self.image_processor_dict) image = np.random.randn(3, 249, 491) images = image_processing.pad_image(image) assert images.shape == (3, 256, 512)
transformers/tests/models/vitmatte/test_image_processing_vitmatte.py/0
{ "file_path": "transformers/tests/models/vitmatte/test_image_processing_vitmatte.py", "repo_id": "transformers", "token_count": 3229 }
389
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Wav2Vec2-BERT model. """ import tempfile import unittest from datasets import load_dataset from transformers import Wav2Vec2BertConfig, is_torch_available from transformers.testing_utils import ( is_pt_flax_cross_test, require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoFeatureExtractor, Wav2Vec2BertForAudioFrameClassification, Wav2Vec2BertForCTC, Wav2Vec2BertForSequenceClassification, Wav2Vec2BertForXVector, Wav2Vec2BertModel, ) from transformers.models.wav2vec2_bert.modeling_wav2vec2_bert import ( _compute_mask_indices, _sample_negative_indices, ) # Copied from tests.models.wav2vec2_conformer.test_modeling_wav2vec2_conformer.Wav2Vec2ConformerModelTester with Conformer->Bert, input_values->input_features class Wav2Vec2BertModelTester: # Ignore copy def __init__( self, parent, batch_size=13, seq_length=200, # speech is longer is_training=False, hidden_size=16, feature_projection_input_dim=16, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, mask_time_prob=0.5, mask_time_length=2, vocab_size=32, do_stable_layer_norm=False, num_adapter_layers=2, adapter_stride=2, tdnn_dim=(32, 32), tdnn_kernel=(5, 3), tdnn_dilation=(1, 2), xvector_output_dim=32, position_embeddings_type="relative", scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feature_projection_input_dim = feature_projection_input_dim self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.num_adapter_layers = num_adapter_layers self.adapter_stride = adapter_stride self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.scope = scope self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim self.position_embeddings_type = position_embeddings_type self.output_seq_length = self.seq_length self.encoder_seq_length = self.output_seq_length self.adapter_output_seq_length = self.output_seq_length for _ in range(num_adapter_layers): self.adapter_output_seq_length = (self.adapter_output_seq_length - 1) // adapter_stride + 1 # Ignore copy def prepare_config_and_inputs(self, position_embeddings_type="relative"): input_shape = [self.batch_size, self.seq_length, self.feature_projection_input_dim] input_features = floats_tensor(input_shape, self.vocab_size) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config(position_embeddings_type=position_embeddings_type) return config, input_features, attention_mask # Ignore copy def get_config(self, position_embeddings_type="relative"): return Wav2Vec2BertConfig( hidden_size=self.hidden_size, feature_projection_input_dim=self.feature_projection_input_dim, mask_time_prob=self.mask_time_prob, mask_time_length=self.mask_time_length, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, do_stable_layer_norm=self.do_stable_layer_norm, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, num_adapter_layers=self.num_adapter_layers, adapter_stride=self.adapter_stride, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, position_embeddings_type=position_embeddings_type, ) def create_and_check_model(self, config, input_features, attention_mask): model = Wav2Vec2BertModel(config=config) model.to(torch_device) model.eval() result = model(input_features, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter(self, config, input_features, attention_mask): config.add_adapter = True model = Wav2Vec2BertModel(config=config) model.to(torch_device) model.eval() result = model(input_features, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter_for_ctc(self, config, input_features, attention_mask): config.add_adapter = True config.output_hidden_size = 2 * config.hidden_size model = Wav2Vec2BertForCTC(config=config) model.to(torch_device) model.eval() result = model(input_features, attention_mask=attention_mask) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.adapter_output_seq_length, self.vocab_size) ) # Ignore copy def create_and_check_model_with_intermediate_ffn_before_adapter(self, config, input_features, attention_mask): config.add_adapter = True config.use_intermediate_ffn_before_adapter = True model = Wav2Vec2BertModel(config=config) model.to(torch_device) model.eval() result = model(input_features, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, config.output_hidden_size), ) # also try with different adapter proj dim config.output_hidden_size = 8 model = Wav2Vec2BertModel(config=config) model.to(torch_device) model.eval() result = model(input_features, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, config.output_hidden_size), ) def create_and_check_model_with_adapter_proj_dim(self, config, input_features, attention_mask): config.add_adapter = True config.output_hidden_size = 8 model = Wav2Vec2BertModel(config=config) model.to(torch_device) model.eval() result = model(input_features, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, config.output_hidden_size), ) def create_and_check_model_float16(self, config, input_features, attention_mask): model = Wav2Vec2BertModel(config=config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = Wav2Vec2BertModel.from_pretrained(tmpdirname, torch_dtype=torch.float16) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_features.type(dtype=torch.float16), attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_features, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = Wav2Vec2BertModel(config=config) model.to(torch_device) model.eval() input_features = input_features[:3] attention_mask = torch.ones(input_features.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_features.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_features[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_features, attention_mask=attention_mask).last_hidden_state for i in range(input_features.shape[0]): input_slice = input_features[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_features, *args): model = Wav2Vec2BertForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_features = input_features[:3] # Ignore copy attention_mask = torch.ones(input_features.shape[:2], device=torch_device, dtype=torch.long) input_lengths = [input_features.shape[1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_features.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_features[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_features, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_features, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_features, *args): model = Wav2Vec2BertForSequenceClassification(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_features = input_features[:3] # Ignore copy attention_mask = torch.ones(input_features.shape[:2], device=torch_device, dtype=torch.long) input_lengths = [input_features.shape[1] // i for i in [4, 2, 1]] labels = ids_tensor((input_features.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_features[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_features, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_features, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_features, *args): config.ctc_zero_infinity = True model = Wav2Vec2BertForCTC(config=config) model.to(torch_device) model.train() # Ignore copy input_features = input_features[:3] input_lengths = [input_features.shape[1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_features.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_features[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lengths are at least # one shorter than logit lengths to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_features, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_features, *args): config.ctc_zero_infinity = True model = Wav2Vec2BertForSequenceClassification(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_features = input_features[:3] # Ignore copy input_lengths = [input_features.shape[1] // i for i in [4, 2, 1]] labels = ids_tensor((input_features.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_features[i, input_lengths[i] :] = 0.0 loss = model(input_features, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_xvector_training(self, config, input_features, *args): config.ctc_zero_infinity = True model = Wav2Vec2BertForXVector(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_features = input_features[:3] input_lengths = [input_features.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_features.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_features[i, input_lengths[i] :] = 0.0 loss = model(input_features, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_features, *args): model = Wav2Vec2BertForCTC(config) model.to(torch_device) model.train() input_features = input_features[:3] input_lengths = [input_features.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_features.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with self.parent.assertRaises(ValueError): model(input_features, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_features, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_features": input_features, "attention_mask": attention_mask} return config, inputs_dict @require_torch # Copied from tests.models.wav2vec2_conformer.test_modeling_wav2vec2_conformer.Wav2Vec2ConformerModelTest with Conformer->Bert, input_values->input_features class Wav2Vec2BertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): # Ignore copy all_model_classes = ( ( Wav2Vec2BertForCTC, Wav2Vec2BertModel, Wav2Vec2BertForSequenceClassification, Wav2Vec2BertForAudioFrameClassification, Wav2Vec2BertForXVector, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": Wav2Vec2BertForSequenceClassification, "automatic-speech-recognition": Wav2Vec2BertForCTC, "feature-extraction": Wav2Vec2BertModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False test_torchscript = False def setUp(self): self.model_tester = Wav2Vec2BertModelTester(self) self.config_tester = ConfigTester(self, config_class=Wav2Vec2BertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_relative(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative") self.model_tester.create_and_check_model(*config_and_inputs) # Ignore copy def test_model_with_relative_key(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative_key") self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_rotary(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="rotary") self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_no_rel_pos(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type=None) self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_for_ctc(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_for_ctc(*config_and_inputs) # Ignore copy def test_model_with_intermediate_ffn_before_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_intermediate_ffn_before_adapter(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) @require_torch_accelerator @require_torch_fp16 def test_model_float16_with_relative(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative") self.model_tester.create_and_check_model_float16(*config_and_inputs) # Ignore copy @require_torch_accelerator @require_torch_fp16 def test_model_float16_with_relative_key(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative_key") self.model_tester.create_and_check_model_float16(*config_and_inputs) @require_torch_accelerator @require_torch_fp16 def test_model_float16_with_rotary(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="rotary") self.model_tester.create_and_check_model_float16(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) # Ignore copy @unittest.skip(reason="Wav2Vec2Bert has no inputs_embeds") def test_inputs_embeds(self): pass # Ignore copy @unittest.skip(reason="`input_ids` is renamed to `input_features`") def test_forward_signature(self): pass # Ignore copy @unittest.skip(reason="Wav2Vec2Bert has no tokens embeddings") def test_resize_tokens_embeddings(self): pass # Ignore copy @unittest.skip(reason="Wav2Vec2Bert has no inputs_embeds") def test_model_common_attributes(self): pass # Ignore copy @unittest.skip(reason="non-robust architecture does not exist in Flax") @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): pass # Ignore copy @unittest.skip(reason="non-robust architecture does not exist in Flax") @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_features = inputs_dict["input_features"] input_lengths = torch.tensor( [input_features.shape[1] for _ in range(input_features.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_features.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "pos_bias_v", "pos_bias_u", "pointwise_conv1", "pointwise_conv2", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "pos_bias_u") and module.pos_bias_u is not None: module.pos_bias_u.data.fill_(3) if hasattr(module, "pos_bias_v") and module.pos_bias_v is not None: module.pos_bias_v.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) # Ignore copy @unittest.skip(reason="Kept to make #Copied from working") def test_mask_feature_prob_ctc(self): pass # Ignore copy @unittest.skip(reason="Kept to make #Copied from working") def test_mask_time_prob_ctc(self): pass @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): # Ignore copy model = Wav2Vec2BertModel.from_pretrained("facebook/w2v-bert-2.0") self.assertIsNotNone(model) @require_torch # Copied from tests.models.wav2vec2_conformer.test_modeling_wav2vec2_conformer.Wav2Vec2ConformerUtilsTest with Conformer->Bert, input_values->input_features class Wav2Vec2BertUtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_low_prob(self): # with these settings num_masked_spans=0.5, which means probabilistic rounding # ensures that in 5 out of 10 method calls, num_masked_spans=0, and in # the other 5 out of 10, cases num_masked_spans=1 n_trials = 100 batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 count_dimensions_masked = 0 count_dimensions_not_masked = 0 for _ in range(n_trials): mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) num_masks = torch.sum(mask).item() if num_masks > 0: count_dimensions_masked += 1 else: count_dimensions_not_masked += 1 # as we test for at least 10 masked dimension and at least # 10 non-masked dimension, this test could fail with probability: # P(100 coin flips, at most 9 heads) = 1.66e-18 self.assertGreater(count_dimensions_masked, int(n_trials * 0.1)) self.assertGreater(count_dimensions_not_masked, int(n_trials * 0.1)) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) def test_compute_mask_indices_attn_mask_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) attention_mask[:2, sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0) def test_compute_mask_indices_short_audio(self): batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) # force one example to be heavily padded attention_mask[0, 5:] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask, min_masks=2 ) # make sure that non-padded examples cannot be padded self.assertFalse(mask[0][attention_mask[0].to(torch.bool).cpu()].any()) # Ignore copy @unittest.skip(reason="Kept to make #Copied from working. Test a class used for pretraining, not yet supported.") def test_compute_perplexity(self): pass def test_sample_negatives(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 features = (torch.arange(sequence_length * hidden_size, device=torch_device) // hidden_size).view( sequence_length, hidden_size ) # each value in vector consits of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # sample negative indices sampled_negative_indices = _sample_negative_indices((batch_size, sequence_length), num_negatives, None) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertTrue(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) def test_sample_negatives_with_mask(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 # second half of last input tensor is padded mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) mask[-1, sequence_length // 2 :] = 0 features = (torch.arange(sequence_length * hidden_size, device=torch_device) // hidden_size).view( sequence_length, hidden_size ) # each value in vector consits of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # replace masked feature vectors with -100 to test that those are not sampled features = torch.where(mask[:, :, None].expand(features.shape).bool(), features, -100) # sample negative indices sampled_negative_indices = _sample_negative_indices( (batch_size, sequence_length), num_negatives, mask.cpu().numpy() ) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue((negatives >= 0).all().item()) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertTrue(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) @require_torch @slow class Wav2Vec2BertModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter(lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)]) speech_samples = speech_samples[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_inference_w2v2_bert(self): model = Wav2Vec2BertModel.from_pretrained("facebook/w2v-bert-2.0") model.to(torch_device) feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/w2v-bert-2.0") input_speech = self._load_datasamples(2) inputs = feature_extractor(input_speech, return_tensors="pt", padding=True).to(torch_device) model.eval() with torch.no_grad(): outputs = model(**inputs, output_attentions=True) # fmt: off expected_slice_0 = torch.tensor( [[-0.0098, -0.0570, -0.1286, 0.0439, -0.1037, -0.0235], [-0.0767, 0.0574, -0.3224, 0.0482, 0.0440, -0.0193], [ 0.0220, -0.0878, -0.2027, -0.0028, -0.0666, 0.0721], [ 0.0307, -0.1099, 0.0273, -0.0416, -0.0715, 0.0094], [ 0.0758, -0.0291, 0.1084, 0.0004, -0.0751, -0.0116], [ 0.0349, -0.0343, -0.0098, 0.0415, -0.0617, 0.0241], [-0.0193, -0.0171, 0.1965, 0.0797, -0.0308, 0.2033], [-0.0323, -0.0315, 0.0948, 0.0944, -0.0254, 0.1241], [-0.0493, 0.0010, -0.1762, 0.0034, -0.0787, 0.0832], [ 0.0043, -0.1228, -0.0739, 0.0266, -0.0337, -0.0068]] ).to(torch_device) # fmt: on # fmt: off expected_slice_1 = torch.tensor( [[-0.0348, -0.0521, -0.3036, 0.0285, -0.0715, -0.0453], [-0.0102, 0.0114, -0.3266, 0.0027, -0.0558, 0.0038], [ 0.0454, 0.0148, -0.2418, -0.0392, -0.0455, 0.0478], [-0.0013, 0.0825, -0.1730, -0.0091, -0.0426, 0.0360], [-0.0227, 0.0687, -0.1168, 0.0569, -0.0160, 0.0759], [-0.0318, 0.0562, -0.0508, 0.0605, 0.0150, 0.0953], [-0.0415, 0.0438, 0.0233, 0.0336, 0.0262, 0.0860], [-0.0163, 0.0048, 0.0807, 0.0119, 0.0712, 0.0158], [ 0.0244, -0.0145, 0.0262, -0.0237, 0.0283, -0.0125], [-0.0587, -0.0516, -0.0368, -0.0196, 0.0307, -0.1434]] ).to(torch_device) # fmt: on self.assertTrue((outputs.last_hidden_state[0, 25:35, 4:10] - expected_slice_0).abs().max() <= 1e-4) self.assertTrue((outputs.last_hidden_state[1, 25:35, 4:10] - expected_slice_1).abs().max() <= 1e-4) self.assertAlmostEqual(outputs.last_hidden_state[1].mean().item(), 3.3123e-05) self.assertAlmostEqual(outputs.last_hidden_state[1].std().item(), 0.1545, delta=2e-5) self.assertListEqual(list(outputs.last_hidden_state.shape), [2, 326, 1024])
transformers/tests/models/wav2vec2_bert/test_modeling_wav2vec2_bert.py/0
{ "file_path": "transformers/tests/models/wav2vec2_bert/test_modeling_wav2vec2_bert.py", "repo_id": "transformers", "token_count": 17010 }
390
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class FlaxXLMRobertaModelIntegrationTest(unittest.TestCase): @slow def test_flax_xlm_roberta_base(self): model = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base") tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base") text = "The dog is cute and lives in the garden house" input_ids = jnp.array([tokenizer.encode(text)]) expected_output_shape = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim expected_output_values_last_dim = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) output = model(input_ids)["last_hidden_state"] self.assertEqual(output.shape, expected_output_shape) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
transformers/tests/models/xlm_roberta/test_modeling_flax_xlm_roberta.py/0
{ "file_path": "transformers/tests/models/xlm_roberta/test_modeling_flax_xlm_roberta.py", "repo_id": "transformers", "token_count": 684 }
391
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch YOSO model. """ import unittest from transformers import YosoConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( YosoForMaskedLM, YosoForMultipleChoice, YosoForQuestionAnswering, YosoForSequenceClassification, YosoForTokenClassification, YosoModel, ) from transformers.models.yoso.modeling_yoso import YOSO_PRETRAINED_MODEL_ARCHIVE_LIST class YosoModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return YosoConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = YosoModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = YosoModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = YosoForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = YosoForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = YosoForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = YosoForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = YosoForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class YosoModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( YosoModel, YosoForMaskedLM, YosoForMultipleChoice, YosoForQuestionAnswering, YosoForSequenceClassification, YosoForTokenClassification, ) if is_torch_available() else () ) test_pruning = False test_headmasking = False test_torchscript = False all_generative_model_classes = () pipeline_model_mapping = ( { "feature-extraction": YosoModel, "fill-mask": YosoForMaskedLM, "question-answering": YosoForQuestionAnswering, "text-classification": YosoForSequenceClassification, "token-classification": YosoForTokenClassification, "zero-shot": YosoForSequenceClassification, } if is_torch_available() else {} ) def setUp(self): self.model_tester = YosoModelTester(self) self.config_tester = ConfigTester(self, config_class=YosoConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in YOSO_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = YosoModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): return @require_torch class YosoModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = YosoModel.from_pretrained("uw-madison/yoso-4096") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 6, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.0611, 0.1242, 0.0840], [0.0280, -0.0048, 0.1125], [0.0106, 0.0226, 0.0751]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_masked_lm(self): model = YosoForMaskedLM.from_pretrained("uw-madison/yoso-4096") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) with torch.no_grad(): output = model(input_ids)[0] vocab_size = 50265 expected_shape = torch.Size((1, 6, vocab_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-2.1313, -3.7285, -2.2407], [-2.7047, -3.3314, -2.6408], [0.0629, -2.5166, -0.3356]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_masked_lm_long_input(self): model = YosoForMaskedLM.from_pretrained("uw-madison/yoso-4096") input_ids = torch.arange(4096).unsqueeze(0) with torch.no_grad(): output = model(input_ids)[0] vocab_size = 50265 expected_shape = torch.Size((1, 4096, vocab_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-2.3914, -4.3742, -5.0956], [-4.0988, -4.2384, -7.0406], [-3.1427, -3.7192, -6.6800]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/yoso/test_modeling_yoso.py/0
{ "file_path": "transformers/tests/models/yoso/test_modeling_yoso.py", "repo_id": "transformers", "token_count": 7199 }
392
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_IMAGE_TO_IMAGE_MAPPING, AutoImageProcessor, AutoModelForImageToImage, ImageToImagePipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch @require_vision class ImageToImagePipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_IMAGE_TO_IMAGE_MAPPING examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ] @require_torch @require_vision @slow def test_pipeline(self): model_id = "caidas/swin2SR-classical-sr-x2-64" upscaler = pipeline("image-to-image", model=model_id) upscaled_list = upscaler(self.examples) self.assertEqual(len(upscaled_list), len(self.examples)) for output in upscaled_list: self.assertIsInstance(output, Image.Image) self.assertEqual(upscaled_list[0].size, (1296, 976)) self.assertEqual(upscaled_list[1].size, (1296, 976)) @require_torch @require_vision @slow def test_pipeline_model_processor(self): model_id = "caidas/swin2SR-classical-sr-x2-64" model = AutoModelForImageToImage.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) upscaler = ImageToImagePipeline(model=model, image_processor=image_processor) upscaled_list = upscaler(self.examples) self.assertEqual(len(upscaled_list), len(self.examples)) for output in upscaled_list: self.assertIsInstance(output, Image.Image) self.assertEqual(upscaled_list[0].size, (1296, 976)) self.assertEqual(upscaled_list[1].size, (1296, 976))
transformers/tests/pipelines/test_pipelines_image_to_image.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_image_to_image.py", "repo_id": "transformers", "token_count": 1057 }
393
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class ZeroShotAudioClassificationPipelineTests(unittest.TestCase): # Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping, # and only CLAP would be there for now. # model_mapping = {CLAPConfig: CLAPModel} @require_torch def test_small_model_pt(self): audio_classifier = pipeline( task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" ) dataset = load_dataset("ashraq/esc50") audio = dataset["train"]["audio"][-1]["array"] output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], ) @unittest.skip("No models are available in TF") def test_small_model_tf(self): pass @slow @require_torch def test_large_model_pt(self): audio_classifier = pipeline( task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", ) # This is an audio of a dog dataset = load_dataset("ashraq/esc50") audio = dataset["train"]["audio"][-1]["array"] output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ) output = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) output = audio_classifier( [audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 ) self.assertEqual( nested_simplify(output), [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) @unittest.skip("No models are available in TF") def test_large_model_tf(self): pass
transformers/tests/pipelines/test_pipelines_zero_shot_audio_classification.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_zero_shot_audio_classification.py", "repo_id": "transformers", "token_count": 1482 }
394
# Testing new Hugging Face Deep Learning Container. This document explains the testing strategy for releasing the new Hugging Face Deep Learning Container. AWS maintains 14 days of currency with framework releases. Besides framework releases, AWS release train is bi-weekly on Monday. Code cutoff date for any changes is the Wednesday before release-Monday. ## Test Case 1: Releasing a New Version (Minor/Major) of 🤗 Transformers ### Requirements: Test should run on Release Candidate for new `transformers` release to validate the new release is compatible with the DLCs. To run these tests you need credentials for the HF SageMaker AWS Account. You can ask @philschmid or @n1t0 to get access. ### Run Tests: Before we can run the tests we need to adjust the `requirements.txt` for PyTorch under `/tests/sagemaker/scripts/pytorch` and for TensorFlow under `/tests/sagemaker/scripts/pytorch`. We adjust the branch to the new RC-tag. ``` git+https://github.com/huggingface/[email protected] # install main or adjust ist with vX.X.X for installing version specific-transforms ``` After we adjusted the `requirements.txt` we can run Amazon SageMaker tests with: ```bash AWS_PROFILE=<enter-your-profile> make test-sagemaker ``` These tests take around 10-15 minutes to finish. Preferably make a screenshot of the successfully ran tests. ### After Transformers Release: After we have released the Release Candidate we need to create a PR at the [Deep Learning Container Repository](https://github.com/aws/deep-learning-containers). **Creating the update PR:** 1. Update the two latest `buildspec.yaml` config for [PyTorch](https://github.com/aws/deep-learning-containers/tree/master/huggingface/pytorch) and [TensorFlow](https://github.com/aws/deep-learning-containers/tree/master/huggingface/tensorflow). The two latest `buildspec.yaml` are the `buildspec.yaml` without a version tag and the one with the highest framework version, e.g. `buildspec-1-7-1.yml` and not `buildspec-1-6.yml`. To update the `buildspec.yaml` we need to adjust either the `transformers_version` or the `datasets_version` or both. Example for upgrading to `transformers 4.5.0` and `datasets 1.6.0`. ```yaml account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment> region: &REGION <set-$REGION-in-environment> base_framework: &BASE_FRAMEWORK pytorch framework: &FRAMEWORK !join [ "huggingface_", *BASE_FRAMEWORK] version: &VERSION 1.6.0 short_version: &SHORT_VERSION 1.6 repository_info: training_repository: &TRAINING_REPOSITORY image_type: &TRAINING_IMAGE_TYPE training root: !join [ "huggingface/", *BASE_FRAMEWORK, "/", *TRAINING_IMAGE_TYPE ] repository_name: &REPOSITORY_NAME !join ["pr", "-", "huggingface", "-", *BASE_FRAMEWORK, "-", *TRAINING_IMAGE_TYPE] repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ] images: BuildHuggingFacePytorchGpuPy37Cu110TrainingDockerImage: <<: *TRAINING_REPOSITORY build: &HUGGINGFACE_PYTORCH_GPU_TRAINING_PY3 false image_size_baseline: &IMAGE_SIZE_BASELINE 15000 device_type: &DEVICE_TYPE gpu python_version: &DOCKER_PYTHON_VERSION py3 tag_python_version: &TAG_PYTHON_VERSION py36 cuda_version: &CUDA_VERSION cu110 os_version: &OS_VERSION ubuntu18.04 transformers_version: &TRANSFORMERS_VERSION 4.5.0 # this was adjusted from 4.4.2 to 4.5.0 datasets_version: &DATASETS_VERSION 1.6.0 # this was adjusted from 1.5.0 to 1.6.0 tag: !join [ *VERSION, '-', 'transformers', *TRANSFORMERS_VERSION, '-', *DEVICE_TYPE, '-', *TAG_PYTHON_VERSION, '-', *CUDA_VERSION, '-', *OS_VERSION ] docker_file: !join [ docker/, *SHORT_VERSION, /, *DOCKER_PYTHON_VERSION, /, *CUDA_VERSION, /Dockerfile., *DEVICE_TYPE ] ``` 2. In the PR comment describe what test, we ran and with which package versions. Here you can copy the table from [Current Tests](#current-tests). 2. In the PR comment describe what test we ran and with which framework versions. Here you can copy the table from [Current Tests](#current-tests). You can take a look at this [PR](https://github.com/aws/deep-learning-containers/pull/1016), which information are needed. ## Test Case 2: Releasing a New AWS Framework DLC ## Execute Tests ### Requirements: AWS is going to release new DLCs for PyTorch and/or TensorFlow. The Tests should run on the new framework versions with current `transformers` release to validate the new framework release is compatible with the `transformers` version. To run these tests you need credentials for the HF SageMaker AWS Account. You can ask @philschmid or @n1t0 to get access. AWS will notify us with a new issue in the repository pointing to their framework upgrade PR. ### Run Tests: Before we can run the tests we need to adjust the `requirements.txt` for Pytorch under `/tests/sagemaker/scripts/pytorch` and for Tensorflow under `/tests/sagemaker/scripts/pytorch`. We add the new framework version to it. ``` torch==1.8.1 # for pytorch tensorflow-gpu==2.5.0 # for tensorflow ``` After we adjusted the `requirements.txt` we can run Amazon SageMaker tests with. ```bash AWS_PROFILE=<enter-your-profile> make test-sagemaker ``` These tests take around 10-15 minutes to finish. Preferably make a screenshot of the successfully ran tests. ### After successful Tests: After we have successfully run tests for the new framework version we need to create a PR at the [Deep Learning Container Repository](https://github.com/aws/deep-learning-containers). **Creating the update PR:** 1. Create a new `buildspec.yaml` config for [PyTorch](https://github.com/aws/deep-learning-containers/tree/master/huggingface/pytorch) and [TensorFlow](https://github.com/aws/deep-learning-containers/tree/master/huggingface/tensorflow) and rename the old `buildspec.yaml` to `buildespec-x.x.x`, where `x.x.x` is the base framework version, e.g. if pytorch 1.6.0 is the latest version in `buildspec.yaml` the file should be renamed to `buildspec-yaml-1-6.yaml`. To create the new `buildspec.yaml` we need to adjust the `version` and the `short_version`. Example for upgrading to `pytorch 1.7.1`. ```yaml account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment> region: &REGION <set-$REGION-in-environment> base_framework: &BASE_FRAMEWORK pytorch framework: &FRAMEWORK !join [ "huggingface_", *BASE_FRAMEWORK] version: &VERSION 1.7.1 # this was adjusted from 1.6.0 to 1.7.1 short_version: &SHORT_VERSION 1.7 # this was adjusted from 1.6 to 1.7 repository_info: training_repository: &TRAINING_REPOSITORY image_type: &TRAINING_IMAGE_TYPE training root: !join [ "huggingface/", *BASE_FRAMEWORK, "/", *TRAINING_IMAGE_TYPE ] repository_name: &REPOSITORY_NAME !join ["pr", "-", "huggingface", "-", *BASE_FRAMEWORK, "-", *TRAINING_IMAGE_TYPE] repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ] images: BuildHuggingFacePytorchGpuPy37Cu110TrainingDockerImage: <<: *TRAINING_REPOSITORY build: &HUGGINGFACE_PYTORCH_GPU_TRAINING_PY3 false image_size_baseline: &IMAGE_SIZE_BASELINE 15000 device_type: &DEVICE_TYPE gpu python_version: &DOCKER_PYTHON_VERSION py3 tag_python_version: &TAG_PYTHON_VERSION py36 cuda_version: &CUDA_VERSION cu110 os_version: &OS_VERSION ubuntu18.04 transformers_version: &TRANSFORMERS_VERSION 4.4.2 datasets_version: &DATASETS_VERSION 1.5.0 tag: !join [ *VERSION, '-', 'transformers', *TRANSFORMERS_VERSION, '-', *DEVICE_TYPE, '-', *TAG_PYTHON_VERSION, '-', *CUDA_VERSION, '-', *OS_VERSION ] docker_file: !join [ docker/, *SHORT_VERSION, /, *DOCKER_PYTHON_VERSION, /, *CUDA_VERSION, /Dockerfile., *DEVICE_TYPE ] ``` 2. In the PR comment describe what test we ran and with which framework versions. Here you can copy the table from [Current Tests](#current-tests). You can take a look at this [PR](https://github.com/aws/deep-learning-containers/pull/1025), which information are needed. ## Current Tests | ID | Description | Platform | #GPUS | Collected & evaluated metrics | |-------------------------------------|-------------------------------------------------------------------|-----------------------------|-------|------------------------------------------| | pytorch-transfromers-test-single | test bert finetuning using BERT fromtransformerlib+PT | SageMaker createTrainingJob | 1 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-2-ddp | test bert finetuning using BERT from transformer lib+ PT DPP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-2-smd | test bert finetuning using BERT from transformer lib+ PT SM DDP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-1-smp | test roberta finetuning using BERT from transformer lib+ PT SM MP | SageMaker createTrainingJob | 8 | train_runtime, eval_accuracy & eval_loss | | tensorflow-transfromers-test-single | Test bert finetuning using BERT from transformer lib+TF | SageMaker createTrainingJob | 1 | train_runtime, eval_accuracy & eval_loss | | tensorflow-transfromers-test-2-smd | test bert finetuning using BERT from transformer lib+ TF SM DDP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss |
transformers/tests/sagemaker/README.md/0
{ "file_path": "transformers/tests/sagemaker/README.md", "repo_id": "transformers", "token_count": 3293 }
395
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class FeatureExtractionSavingTestMixin: test_cast_dtype = None def test_feat_extract_to_json_string(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) obj = json.loads(feat_extract.to_json_string()) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key], value) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict()) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict()) def test_init_without_params(self): feat_extract = self.feature_extraction_class() self.assertIsNotNone(feat_extract)
transformers/tests/test_feature_extraction_common.py/0
{ "file_path": "transformers/tests/test_feature_extraction_common.py", "repo_id": "transformers", "token_count": 828 }
396
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import load_tool from transformers.tools.agent_types import AGENT_TYPE_MAPPING from .test_tools_common import ToolTesterMixin, output_types class TranslationToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("translation") self.tool.setup() self.remote_tool = load_tool("translation", remote=True) def test_exact_match_arg(self): result = self.tool("Hey, what's up?", src_lang="English", tgt_lang="French") self.assertEqual(result, "- Hé, comment ça va?") def test_exact_match_arg_remote(self): result = self.remote_tool("Hey, what's up?", src_lang="English", tgt_lang="French") self.assertEqual(result, "- Hé, comment ça va?") def test_exact_match_kwarg(self): result = self.tool(text="Hey, what's up?", src_lang="English", tgt_lang="French") self.assertEqual(result, "- Hé, comment ça va?") def test_exact_match_kwarg_remote(self): result = self.remote_tool(text="Hey, what's up?", src_lang="English", tgt_lang="French") self.assertEqual(result, "- Hé, comment ça va?") def test_call(self): inputs = ["Hey, what's up?", "English", "Spanish"] outputs = self.tool(*inputs) # There is a single output if len(self.tool.outputs) == 1: outputs = [outputs] self.assertListEqual(output_types(outputs), self.tool.outputs) def test_agent_types_outputs(self): inputs = ["Hey, what's up?", "English", "Spanish"] outputs = self.tool(*inputs) if not isinstance(outputs, list): outputs = [outputs] self.assertEqual(len(outputs), len(self.tool.outputs)) for output, output_type in zip(outputs, self.tool.outputs): agent_type = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(output, agent_type)) def test_agent_types_inputs(self): inputs = ["Hey, what's up?", "English", "Spanish"] _inputs = [] for _input, input_type in zip(inputs, self.tool.inputs): if isinstance(input_type, list): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type]) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input)) # Should not raise an error outputs = self.tool(*inputs) if not isinstance(outputs, list): outputs = [outputs] self.assertEqual(len(outputs), len(self.tool.outputs))
transformers/tests/tools/test_translation.py/0
{ "file_path": "transformers/tests/tools/test_translation.py", "repo_id": "transformers", "token_count": 1249 }
397
import unittest import warnings from dataclasses import dataclass from transformers.convert_slow_tokenizer import SpmConverter from transformers.testing_utils import get_tests_dir @dataclass class FakeOriginalTokenizer: vocab_file: str class ConvertSlowTokenizerTest(unittest.TestCase): def test_spm_converter_bytefallback_warning(self): spm_model_file_without_bytefallback = get_tests_dir("fixtures/test_sentencepiece.model") spm_model_file_with_bytefallback = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model") original_tokenizer_without_bytefallback = FakeOriginalTokenizer(vocab_file=spm_model_file_without_bytefallback) with warnings.catch_warnings(record=True) as w: _ = SpmConverter(original_tokenizer_without_bytefallback) self.assertEqual(len(w), 0) original_tokenizer_with_bytefallback = FakeOriginalTokenizer(vocab_file=spm_model_file_with_bytefallback) with warnings.catch_warnings(record=True) as w: _ = SpmConverter(original_tokenizer_with_bytefallback) self.assertEqual(len(w), 1) self.assertIn( "The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option" " which is not implemented in the fast tokenizers.", str(w[0].message), )
transformers/tests/utils/test_convert_slow_tokenizer.py/0
{ "file_path": "transformers/tests/utils/test_convert_slow_tokenizer.py", "repo_id": "transformers", "token_count": 524 }
398
{ "ASTForAudioClassification": { "tokenizer_classes": [], "processor_classes": [ "ASTFeatureExtractor" ], "model_classes": [ "ASTForAudioClassification" ], "sha": "83d6e076db7768a3645401bad3204624985e1d08" }, "ASTModel": { "tokenizer_classes": [], "processor_classes": [ "ASTFeatureExtractor" ], "model_classes": [ "ASTModel" ], "sha": "75e68f956f6f2c0709b01e596e7a6aecb1b29dce" }, "AlbertForMaskedLM": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForMaskedLM", "TFAlbertForMaskedLM" ], "sha": "d29de71ac29e1019c3a7762f7357f750730cb037" }, "AlbertForMultipleChoice": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForMultipleChoice", "TFAlbertForMultipleChoice" ], "sha": "242aecce6a589a2964c0f695621fa22a83751579" }, "AlbertForPreTraining": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForPreTraining", "TFAlbertForPreTraining" ], "sha": "41330be4b271687f4d88ddc96346c12aa11de983" }, "AlbertForQuestionAnswering": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForQuestionAnswering", "TFAlbertForQuestionAnswering" ], "sha": "040b81c15f437f4722349dc5b41fccd17ebd7fdc" }, "AlbertForSequenceClassification": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForSequenceClassification", "TFAlbertForSequenceClassification" ], "sha": "39c1a0e2c1c2623106d3211d751e9b32f23a91a0" }, "AlbertForTokenClassification": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForTokenClassification", "TFAlbertForTokenClassification" ], "sha": "359c3f4a311a4053a6f6d6a880db5f82c8e3ff1f" }, "AlbertModel": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertModel", "TFAlbertModel" ], "sha": "34a63314686b64aaeb595ddb95006f1ff2ffda17" }, "AlignModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "EfficientNetImageProcessor" ], "model_classes": [ "AlignModel" ], "sha": "68a4f9d3f493f44efa7c1dde6fcca23350e2c92b" }, "AltCLIPModel": { "tokenizer_classes": [ "XLMRobertaTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "AltCLIPModel" ], "sha": "3106af0fd503970717c05f27218e5cacf19ba872" }, "BarkModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BarkModel" ], "sha": "187e590fd87359cea47693e8cb11a604cd7b673c" }, "BartForCausalLM": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartForCausalLM" ], "sha": "c25526ac67d2dbe79fe5462af4b7908ca2fbc3ff" }, "BartForConditionalGeneration": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartForConditionalGeneration", "TFBartForConditionalGeneration" ], "sha": "3a489a21e4b04705f4a6047924b7616a67be7e37" }, "BartForQuestionAnswering": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartForQuestionAnswering" ], "sha": "3ebf9aab39a57ceab55128d5fc6f61e4db0dadd4" }, "BartForSequenceClassification": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartForSequenceClassification", "TFBartForSequenceClassification" ], "sha": "ea452fd9a928cfebd71723afa50feb20326917bc" }, "BartModel": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartModel", "TFBartModel" ], "sha": "e5df6d1aa75f03833b2df328b9c35463f73a421b" }, "BeitForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "BeitForImageClassification" ], "sha": "e997587bb890f82faad4bd25eb23d85ba21ecaaa" }, "BeitForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "BeitForSemanticSegmentation" ], "sha": "d4afa9e21e3fe5b087578ed68974d9b3ffc1fb22" }, "BeitModel": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "BeitModel" ], "sha": "5c4a051f0cca6f64d02c6168deb88413cae10d2c" }, "BertForMaskedLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForMaskedLM", "TFBertForMaskedLM" ], "sha": "3e32baa52ce044c75edfb5c28abd51ee8d051282" }, "BertForMultipleChoice": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForMultipleChoice", "TFBertForMultipleChoice" ], "sha": "0b8c3a6d411d1e19e5fd98d4d8631ae7616eeeaa" }, "BertForNextSentencePrediction": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForNextSentencePrediction", "TFBertForNextSentencePrediction" ], "sha": "628e70debf8864bd0b63aff7901d17d9c4f7612c" }, "BertForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForPreTraining", "TFBertForPreTraining" ], "sha": "c748ad37e6a200a6f64b2764191bfe13f976032f" }, "BertForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForQuestionAnswering", "TFBertForQuestionAnswering" ], "sha": "4671ad0c21493b97c5eb2f0201192704c29876d5" }, "BertForSequenceClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForSequenceClassification", "TFBertForSequenceClassification" ], "sha": "37a9d44022264c12bdf3ec257778f953b63d4aaf" }, "BertForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForTokenClassification", "TFBertForTokenClassification" ], "sha": "d7dc3a0793ff6dfcb794b21130ee0f185d2c61a2" }, "BertLMHeadModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertLMHeadModel", "TFBertLMHeadModel" ], "sha": "b4e3acc1990f3e365ffddbd54b620a26d9fb4b09" }, "BertModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertModel", "TFBertModel" ], "sha": "3956d303d3cddf0708ff20660c1ea5f6ec30e434" }, "BigBirdForCausalLM": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForCausalLM" ], "sha": "5c7a487af5248d9c01b45d5481b7d7bb9b36e1b5" }, "BigBirdForMaskedLM": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForMaskedLM" ], "sha": "476ef8225c0f69270b577706ad4f1dda13e4dde5" }, "BigBirdForMultipleChoice": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForMultipleChoice" ], "sha": "cf93eaa1019987112c171a407745bc183a20513a" }, "BigBirdForPreTraining": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForPreTraining" ], "sha": "5fb9efa13334431e7c186a9fa314b89c4a1eee72" }, "BigBirdForQuestionAnswering": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForQuestionAnswering" ], "sha": "f82f88bd71fba819a8ffb0692915d3529e705417" }, "BigBirdForSequenceClassification": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForSequenceClassification" ], "sha": "ea398090858f9af93b54fc9a8d65cfed78ac27ff" }, "BigBirdForTokenClassification": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForTokenClassification" ], "sha": "2cdea118999fa58ba9fb0162d99e2ffa146c3df1" }, "BigBirdModel": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdModel" ], "sha": "9c55989f31df156194e6997606fb14d9897e0300" }, "BigBirdPegasusForCausalLM": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusForCausalLM" ], "sha": "49bc8816c666dee32e27cd8e00136b604eb85243" }, "BigBirdPegasusForConditionalGeneration": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusForConditionalGeneration" ], "sha": "e791aa6d1af5a76ca0926d95b1f28bd2d8adf376" }, "BigBirdPegasusForQuestionAnswering": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusForQuestionAnswering" ], "sha": "7650e076713ca707a37062adc8c9c1cd60dad7c7" }, "BigBirdPegasusForSequenceClassification": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusForSequenceClassification" ], "sha": "02500e8ebd9c53528750013fb963fbdc2be34034" }, "BigBirdPegasusModel": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusModel" ], "sha": "b07c5304dfba673cf8b9cf5cd1aa45fbfea1c2f3" }, "BioGptForCausalLM": { "tokenizer_classes": [ "BioGptTokenizer" ], "processor_classes": [], "model_classes": [ "BioGptForCausalLM" ], "sha": "07073b31da84054fd12226e3cae4cb3beb2547f9" }, "BioGptForSequenceClassification": { "tokenizer_classes": [ "BioGptTokenizer" ], "processor_classes": [], "model_classes": [ "BioGptForSequenceClassification" ], "sha": "8e18ad6218abd795e050dec324a8c827ccedacb4" }, "BioGptForTokenClassification": { "tokenizer_classes": [ "BioGptTokenizer" ], "processor_classes": [], "model_classes": [ "BioGptForTokenClassification" ], "sha": "67f8173c1a17273064d452a9031a51b67f327b6a" }, "BioGptModel": { "tokenizer_classes": [ "BioGptTokenizer" ], "processor_classes": [], "model_classes": [ "BioGptModel" ], "sha": "fe18551d0743538a990520b75707294ec57b4ebe" }, "BitBackbone": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "BitBackbone" ], "sha": "2f06f6b4395b6dce2b00ac839ff757410e743cd7" }, "BitForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "BitForImageClassification" ], "sha": "d0d8476f2d285ddda7c42c0d4a8e4bf6f5d2bfdf" }, "BitModel": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "BitModel" ], "sha": "30a8a9b1a6b253cc500c01cf41bc1fc9581ea5e5" }, "BlenderbotForCausalLM": { "tokenizer_classes": [ "BlenderbotTokenizer", "BlenderbotTokenizerFast" ], "processor_classes": [], "model_classes": [ "BlenderbotForCausalLM" ], "sha": "8aad2e13e8920bca3cf988ba45f8a7b008b51a81" }, "BlenderbotForConditionalGeneration": { "tokenizer_classes": [ "BlenderbotTokenizer", "BlenderbotTokenizerFast" ], "processor_classes": [], "model_classes": [ "BlenderbotForConditionalGeneration", "TFBlenderbotForConditionalGeneration" ], "sha": "e8532878b9924fa02fb4b059b7f6e7fa372fff91" }, "BlenderbotModel": { "tokenizer_classes": [ "BlenderbotTokenizer", "BlenderbotTokenizerFast" ], "processor_classes": [], "model_classes": [ "BlenderbotModel", "TFBlenderbotModel" ], "sha": "ff848a40c30ca98eb7c6870bbb02677d5af9db55" }, "BlenderbotSmallForCausalLM": { "tokenizer_classes": [ "BlenderbotSmallTokenizer" ], "processor_classes": [], "model_classes": [ "BlenderbotSmallForCausalLM" ], "sha": "4c57c106630932eb9de4d76210a540d04616304d" }, "BlenderbotSmallForConditionalGeneration": { "tokenizer_classes": [ "BlenderbotSmallTokenizer" ], "processor_classes": [], "model_classes": [ "BlenderbotSmallForConditionalGeneration", "TFBlenderbotSmallForConditionalGeneration" ], "sha": "b8db01fcf3e37a5b369cd50e169bf383b8e905d8" }, "BlenderbotSmallModel": { "tokenizer_classes": [ "BlenderbotSmallTokenizer" ], "processor_classes": [], "model_classes": [ "BlenderbotSmallModel", "TFBlenderbotSmallModel" ], "sha": "0a10c70e225ec63278faffa8fabf759f063f0e55" }, "Blip2ForConditionalGeneration": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [ "BlipImageProcessor" ], "model_classes": [ "Blip2ForConditionalGeneration" ], "sha": "35e1ef43da3554af62eb29a7b3dbbef3f3bef48e" }, "Blip2Model": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [ "BlipImageProcessor" ], "model_classes": [ "Blip2Model" ], "sha": "c23378f225be31872fff33c103cf0ebc2454ffcc" }, "BlipForConditionalGeneration": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "BlipImageProcessor" ], "model_classes": [ "BlipForConditionalGeneration", "TFBlipForConditionalGeneration" ], "sha": "eaf32bc0369349deef0c777442fc185119171d1f" }, "BlipModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "BlipImageProcessor" ], "model_classes": [ "BlipModel", "TFBlipModel" ], "sha": "3d1d1c15eff22d6b2664a2d15757fa6f5d93827d" }, "BloomForCausalLM": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomForCausalLM" ], "sha": "0f4f06f162cd67d34d03ee156484e4001d468500" }, "BloomForQuestionAnswering": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomForQuestionAnswering" ], "sha": "23f369f163eef8c9c9685900440b0cbb0f3439fd" }, "BloomForSequenceClassification": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomForSequenceClassification" ], "sha": "b2280eef7172835f39b265eb0c46623257f67bbe" }, "BloomForTokenClassification": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomForTokenClassification" ], "sha": "9796aa45f99adff987c978089e11c0bd9d7b997f" }, "BloomModel": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomModel" ], "sha": "28b600fcfdc4f4938406fb518abf895620048cb2" }, "BrosForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BrosForTokenClassification" ], "sha": "4ec2c91936f96b93667e8946fc7abbdeeb08a6d7" }, "BrosModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BrosModel" ], "sha": "e2464830b1874eeaf9f4b425fbe0ce8e7c7643e9" }, "CLIPModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "CLIPModel", "TFCLIPModel" ], "sha": "0452d344074485d0e7eb5d5c12447b7c9dbc9619" }, "CLIPSegModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "CLIPSegModel" ], "sha": "7b1305214ccc85d29b776ffbee06748693852a04" }, "CTRLForSequenceClassification": { "tokenizer_classes": [ "CTRLTokenizer" ], "processor_classes": [], "model_classes": [ "CTRLForSequenceClassification", "TFCTRLForSequenceClassification" ], "sha": "280b5a3502d607c55c9f8d9f198fe9c2802d6f73" }, "CTRLLMHeadModel": { "tokenizer_classes": [ "CTRLTokenizer" ], "processor_classes": [], "model_classes": [ "CTRLLMHeadModel", "TFCTRLLMHeadModel" ], "sha": "662381663b216f1dd3c9cd30e2e83cb4c6fc9552" }, "CTRLModel": { "tokenizer_classes": [ "CTRLTokenizer" ], "processor_classes": [], "model_classes": [ "CTRLModel", "TFCTRLModel" ], "sha": "68b19b4f132d5a191a73acd78d983cbdcf068e9c" }, "CanineForMultipleChoice": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineForMultipleChoice" ], "sha": "fa0451453ed202f903ff7dcf6071aab6630fb89f" }, "CanineForQuestionAnswering": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineForQuestionAnswering" ], "sha": "5e1012bb086ac2e0b1497eeb7ed14eb2183d4ecb" }, "CanineForSequenceClassification": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineForSequenceClassification" ], "sha": "75336dc9179153869c38a8047ce4b1e02677a260" }, "CanineForTokenClassification": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineForTokenClassification" ], "sha": "65a622ea8e12597e12f45e59d46d8dbe8461fc10" }, "CanineModel": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineModel" ], "sha": "531ef67ad4f0b3dc7a9e5d722c774096b7401b1b" }, "ChineseCLIPModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "ChineseCLIPImageProcessor" ], "model_classes": [ "ChineseCLIPModel" ], "sha": "504271a3c5fd9c2e877f5b4c01848bc18778c7c3" }, "ClapModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [ "ClapFeatureExtractor" ], "model_classes": [ "ClapModel" ], "sha": "a7874595b900f9b2ddc79130dafc3ff48f4fbfb9" }, "ClvpModelForConditionalGeneration": { "tokenizer_classes": [ "ClvpTokenizer" ], "processor_classes": [ "ClvpFeatureExtractor" ], "model_classes": [], "sha": "45df7581535be337ff781707b6c20994ca221f05" }, "CodeGenForCausalLM": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "CodeGenForCausalLM" ], "sha": "a3fc69d757fd1f0aa01bcbc4337f586651c7cb10" }, "CodeGenModel": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "CodeGenModel" ], "sha": "dad4941a2b7429fc6e8206fcc4a04fc40f4a0beb" }, "ConditionalDetrForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "ConditionalDetrImageProcessor" ], "model_classes": [ "ConditionalDetrForObjectDetection" ], "sha": "762c213a0285edc84eb813a2ed90063cf971ca43" }, "ConditionalDetrModel": { "tokenizer_classes": [], "processor_classes": [ "ConditionalDetrImageProcessor" ], "model_classes": [ "ConditionalDetrModel" ], "sha": "18b75874158cac520c63605293b06e0b1327c263" }, "ConvBertForMaskedLM": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForMaskedLM", "TFConvBertForMaskedLM" ], "sha": "307c70e32c3d3c18aeb45e0cbdc9fcd2957d9aba" }, "ConvBertForMultipleChoice": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForMultipleChoice", "TFConvBertForMultipleChoice" ], "sha": "d6561a21ffdb82d03c1822af0510eb7482ce5026" }, "ConvBertForQuestionAnswering": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForQuestionAnswering", "TFConvBertForQuestionAnswering" ], "sha": "8a056da5cc421415c2a24b9f644dd95ca279411d" }, "ConvBertForSequenceClassification": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForSequenceClassification", "TFConvBertForSequenceClassification" ], "sha": "8bb8b20e51d282d777cc567cacadd97a35f0811e" }, "ConvBertForTokenClassification": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForTokenClassification", "TFConvBertForTokenClassification" ], "sha": "8db0dd3c2b8ccc958fa9a84801f4f837b42fcf2c" }, "ConvBertModel": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertModel", "TFConvBertModel" ], "sha": "c9c5b1a74f0e468d8467473cabeaa67fcdbaddb7" }, "ConvNextBackbone": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextBackbone" ], "sha": "499c7d6a97825b79e19663b70f3b60c4813b6bf2" }, "ConvNextForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextForImageClassification", "TFConvNextForImageClassification" ], "sha": "0b490fd6b19cdbf721025dbd6ee45dcc5828e6e3" }, "ConvNextModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextModel", "TFConvNextModel" ], "sha": "7b3b47a57b9a9120e022b91d6067daeac55b794f" }, "ConvNextV2Backbone": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextV2Backbone" ], "sha": "c82fc526949dfd892a1fee3c34be6f8d80c4d3df" }, "ConvNextV2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextV2ForImageClassification", "TFConvNextV2ForImageClassification" ], "sha": "ee22bae1cbb87d66fc7f62f7e15a43d6ff80d3cc" }, "ConvNextV2Model": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextV2Model", "TFConvNextV2Model" ], "sha": "c4dd68ee1102cba05bcc483da2a88e39427b7249" }, "CvtForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "CvtForImageClassification", "TFCvtForImageClassification" ], "sha": "4b1938e252fdb26a06c1f5755e07fa8f6eed2d75" }, "CvtModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "CvtModel", "TFCvtModel" ], "sha": "27fed12c174f4f4f1fe27075d1c29602fe0669f0" }, "DPRQuestionEncoder": { "tokenizer_classes": [ "DPRQuestionEncoderTokenizer", "DPRQuestionEncoderTokenizerFast" ], "processor_classes": [], "model_classes": [ "DPRQuestionEncoder", "TFDPRQuestionEncoder" ], "sha": "09ae0269780271e0a4916f7bab1dbc4f8a76070d" }, "DPTForDepthEstimation": { "tokenizer_classes": [], "processor_classes": [ "DPTImageProcessor" ], "model_classes": [ "DPTForDepthEstimation" ], "sha": "11b7735d64d95b6599811631b012d2dec6eaa2c1" }, "DPTForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "DPTImageProcessor" ], "model_classes": [ "DPTForSemanticSegmentation" ], "sha": "e140c3c716a4bf11dad875e5f5f0abd2bd4cbbcb" }, "DPTModel": { "tokenizer_classes": [], "processor_classes": [ "DPTImageProcessor" ], "model_classes": [ "DPTModel" ], "sha": "1d6ae6c0b60868dffbef0dddeda381c51c6dcba5" }, "Data2VecAudioForAudioFrameClassification": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioForAudioFrameClassification" ], "sha": "a64828b27e73fc8dd95aeb315108ca2f6a66b55f" }, "Data2VecAudioForCTC": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioForCTC" ], "sha": "bb161b6a181bd2c22cf30222f46fa6ef42225744" }, "Data2VecAudioForSequenceClassification": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioForSequenceClassification" ], "sha": "8de17e0a959eca5f72b2ea59a11bc1fa744785d9" }, "Data2VecAudioForXVector": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioForXVector" ], "sha": "dcb92484cf28fb4fe1dcf5d6e8d78e04382fdce9" }, "Data2VecAudioModel": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioModel" ], "sha": "73f503fdff73b7616154f64dbe38a685cc48e8eb" }, "Data2VecTextForCausalLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForCausalLM" ], "sha": "1f3658ce623653338cd31516551e8181aa08bb38" }, "Data2VecTextForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForMaskedLM" ], "sha": "fb41ac30d0faa0899bf5afaa0986df8993395ca6" }, "Data2VecTextForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForMultipleChoice" ], "sha": "e7556d520ad90ebae5ad88554d45a37488d00040" }, "Data2VecTextForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForQuestionAnswering" ], "sha": "9630833d76a1fd7e96b904d87bb11b7c00ccd021" }, "Data2VecTextForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForSequenceClassification" ], "sha": "156e4019c37d9592f193ba80553cd245cbccecb3" }, "Data2VecTextForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForTokenClassification" ], "sha": "55b3a49fdbf22479d6eb939261d4b884ea288270" }, "Data2VecTextModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextModel" ], "sha": "c21be3e4f88e8357bf33bfba8f8e05ae2e735124" }, "Data2VecVisionForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "Data2VecVisionForImageClassification", "TFData2VecVisionForImageClassification" ], "sha": "d640e7ced7a3fbbb8c8661a4f67b934e55406172" }, "Data2VecVisionForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "Data2VecVisionForSemanticSegmentation", "TFData2VecVisionForSemanticSegmentation" ], "sha": "3eba3cd694fab6530b7e5da8f49d3951301c816a" }, "Data2VecVisionModel": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "Data2VecVisionModel", "TFData2VecVisionModel" ], "sha": "2a7ad25e4359970dc70494a2f3eb98e2a3c9806d" }, "DebertaForMaskedLM": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaForMaskedLM", "TFDebertaForMaskedLM" ], "sha": "e0f9ada9e0f6d4d7cc39d7cbd58369b0c84de33d" }, "DebertaForQuestionAnswering": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaForQuestionAnswering", "TFDebertaForQuestionAnswering" ], "sha": "a3eb69cdb0b52f7d0fb730e882f1a54b9a7442ea" }, "DebertaForSequenceClassification": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaForSequenceClassification", "TFDebertaForSequenceClassification" ], "sha": "32af91d12c4e9b6d62b420bee93311fd77d3c933" }, "DebertaForTokenClassification": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaForTokenClassification", "TFDebertaForTokenClassification" ], "sha": "ba62ba2726d813e60e512476fc1b178aa3858175" }, "DebertaModel": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaModel", "TFDebertaModel" ], "sha": "4273294e14cd04c0e2cd1dcff5cf7e5d4fe906ba" }, "DebertaV2ForMaskedLM": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForMaskedLM", "TFDebertaV2ForMaskedLM" ], "sha": "a053dedc2cdf32918a84277cb0c05186604496a5" }, "DebertaV2ForMultipleChoice": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForMultipleChoice", "TFDebertaV2ForMultipleChoice" ], "sha": "07e39f520ce239b39ef8cb24cd7874d06c791063" }, "DebertaV2ForQuestionAnswering": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForQuestionAnswering", "TFDebertaV2ForQuestionAnswering" ], "sha": "9cecb3a7fc6b95099122283644ea1f8ced287d1b" }, "DebertaV2ForSequenceClassification": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForSequenceClassification", "TFDebertaV2ForSequenceClassification" ], "sha": "df9ea1f5c0f2ccd139b21cfb3963a5a5ebfb5b81" }, "DebertaV2ForTokenClassification": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForTokenClassification", "TFDebertaV2ForTokenClassification" ], "sha": "51fe01989df38a540ac1abca5ee71a51365defd5" }, "DebertaV2Model": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2Model", "TFDebertaV2Model" ], "sha": "211df4bd1a4a9b66c97af3f9231a5d2af8de7b9f" }, "DeformableDetrForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "DeformableDetrImageProcessor" ], "model_classes": [ "DeformableDetrForObjectDetection" ], "sha": "8fa0db215c458f60ae4d455d6fb067c1c5e39fdc" }, "DeformableDetrModel": { "tokenizer_classes": [], "processor_classes": [ "DeformableDetrImageProcessor" ], "model_classes": [ "DeformableDetrModel" ], "sha": "0faac5624696b03edd14694642f9804f2cd8f3da" }, "DeiTForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "DeiTImageProcessor" ], "model_classes": [ "DeiTForImageClassification", "TFDeiTForImageClassification" ], "sha": "21fc864199dafa0130f16a45769c6b6ca22c7784" }, "DeiTForImageClassificationWithTeacher": { "tokenizer_classes": [], "processor_classes": [ "DeiTImageProcessor" ], "model_classes": [ "DeiTForImageClassificationWithTeacher", "TFDeiTForImageClassificationWithTeacher" ], "sha": "5a5738a109e27f3d4b78a0db4cb1d3331140c10e" }, "DeiTForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "DeiTImageProcessor" ], "model_classes": [ "DeiTForMaskedImageModeling", "TFDeiTForMaskedImageModeling" ], "sha": "d5df5c538fe1efb8d668a3893d1691d505a0de06" }, "DeiTModel": { "tokenizer_classes": [], "processor_classes": [ "DeiTImageProcessor" ], "model_classes": [ "DeiTModel", "TFDeiTModel" ], "sha": "0fdbff6f44b7c6933c2027fec1d7f87bec06b590" }, "DetaForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "DetaImageProcessor" ], "model_classes": [ "DetaForObjectDetection" ], "sha": "a15ad6ce64fbcb5021b2b99e9587c4011ef3341d" }, "DetaModel": { "tokenizer_classes": [], "processor_classes": [ "DetaImageProcessor" ], "model_classes": [ "DetaModel" ], "sha": "8820f2297ec0dec8f1875054559c8b7a162098e3" }, "DetrForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "DetrForObjectDetection" ], "sha": "7dc967c53f4b3f07904c42b255346b744d0ad84e" }, "DetrForSegmentation": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "DetrForSegmentation" ], "sha": "e34330acdae359588ef853e961a78d419dc4e8eb" }, "DetrModel": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "DetrModel" ], "sha": "f15ce38a10c7447e8048b1681e4811322a005722" }, "DinatBackbone": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "DinatBackbone" ], "sha": "3ba13790a0796d90104c207f75bb3d5d79723d51" }, "DinatForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "DinatForImageClassification" ], "sha": "624cf2d864a7ea2f90e24014a213e34597e8bd76" }, "DinatModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "DinatModel" ], "sha": "d6c75bc51196f0a683afb12de6310fdda13efefd" }, "Dinov2Backbone": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "Dinov2Backbone" ], "sha": "dbf8d2ff3092ac53c11e6525e6cbae7ace84769a" }, "Dinov2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "Dinov2ForImageClassification" ], "sha": "ae44840966456aae33641df2c8c8a4af5b457b24" }, "Dinov2Model": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "Dinov2Model" ], "sha": "6f560b1cc9806bcf84fe0b0c60b5faf9c29be959" }, "DistilBertForMaskedLM": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForMaskedLM", "TFDistilBertForMaskedLM" ], "sha": "b2dfda30b012821996e6e603729562d9c900bc0f" }, "DistilBertForMultipleChoice": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForMultipleChoice", "TFDistilBertForMultipleChoice" ], "sha": "ec6b83129a7d1be2a6b8d58303abcca5541a5cb3" }, "DistilBertForQuestionAnswering": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForQuestionAnswering", "TFDistilBertForQuestionAnswering" ], "sha": "812406b226415044469b0e0a84c4fe0ff338c5d3" }, "DistilBertForSequenceClassification": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForSequenceClassification", "TFDistilBertForSequenceClassification" ], "sha": "6f427ce7b3e5aaa596938fbd98437d3875581b7b" }, "DistilBertForTokenClassification": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForTokenClassification", "TFDistilBertForTokenClassification" ], "sha": "166dbe3f5d6ecd871762567069454d6ec65234b4" }, "DistilBertModel": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertModel", "TFDistilBertModel" ], "sha": "cc4425ad0676f3ec00e8bffe485fe83cae61041a" }, "DonutSwinModel": { "tokenizer_classes": [], "processor_classes": [ "DonutImageProcessor" ], "model_classes": [ "DonutSwinModel" ], "sha": "1b10654fbfe2f2ea410a672ab605bd5c60d3f284" }, "EfficientFormerForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "EfficientFormerImageProcessor" ], "model_classes": [ "EfficientFormerForImageClassification", "TFEfficientFormerForImageClassification" ], "sha": "ebadb628e12f268e321fcc756fa4606f7b5b3178" }, "EfficientFormerForImageClassificationWithTeacher": { "tokenizer_classes": [], "processor_classes": [ "EfficientFormerImageProcessor" ], "model_classes": [ "EfficientFormerForImageClassificationWithTeacher", "TFEfficientFormerForImageClassificationWithTeacher" ], "sha": "1beabce6da9cb4ebbeafcd1ef23fac36b4a269e2" }, "EfficientFormerModel": { "tokenizer_classes": [], "processor_classes": [ "EfficientFormerImageProcessor" ], "model_classes": [ "EfficientFormerModel", "TFEfficientFormerModel" ], "sha": "200fae5b875844d09c8a91d1c155b72b06a517f6" }, "EfficientNetForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "EfficientNetImageProcessor" ], "model_classes": [ "EfficientNetForImageClassification" ], "sha": "6ed195ee636d2c0b885139da8c7b45d57ebaeee0" }, "EfficientNetModel": { "tokenizer_classes": [], "processor_classes": [ "EfficientNetImageProcessor" ], "model_classes": [ "EfficientNetModel" ], "sha": "eb03c90d4aaad98af0f19e0dfbdc41106297ffff" }, "ElectraForCausalLM": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForCausalLM" ], "sha": "c78396bc8cdd8db247892339de8da80d691d1d04" }, "ElectraForMaskedLM": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForMaskedLM", "TFElectraForMaskedLM" ], "sha": "631337703dbd8d41904c39891a41c6f1edd31813" }, "ElectraForMultipleChoice": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForMultipleChoice", "TFElectraForMultipleChoice" ], "sha": "66fdea6e22cfcbd3caa49ea82f31871c460612fa" }, "ElectraForPreTraining": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForPreTraining", "TFElectraForPreTraining" ], "sha": "7b2d0fa8726b1180c7d6cde4f4afc3800eba7e6f" }, "ElectraForQuestionAnswering": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForQuestionAnswering", "TFElectraForQuestionAnswering" ], "sha": "c6b127fd9f3019462e4ca2373762836207e39ce2" }, "ElectraForSequenceClassification": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForSequenceClassification", "TFElectraForSequenceClassification" ], "sha": "41f0089ab7876abe0e28dbbd565144acb31f8127" }, "ElectraForTokenClassification": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForTokenClassification", "TFElectraForTokenClassification" ], "sha": "1fdbbe70c1ddd16503820a1443d6a379a15ed777" }, "ElectraModel": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraModel", "TFElectraModel" ], "sha": "312b532cbef26610d80f2bd008650160cae4f7a1" }, "EncodecModel": { "tokenizer_classes": [], "processor_classes": [ "EncodecFeatureExtractor" ], "model_classes": [ "EncodecModel" ], "sha": "e14c5a2fd6529c85cd4ac5a05ee9e550ced6a006" }, "EncoderDecoderModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "EncoderDecoderModel", "TFEncoderDecoderModel" ], "sha": "1038be9fd1b87b2e0a8f33721ff8e4612d34b3b6" }, "ErnieForCausalLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForCausalLM" ], "sha": "b49e00112ff06c2f0a0e54499921dddcf8c3c6a8" }, "ErnieForMaskedLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForMaskedLM" ], "sha": "30429830d1997222d885dcfdbd36d5e02d0d34b1" }, "ErnieForMultipleChoice": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForMultipleChoice" ], "sha": "5a21144bf35dfb60560ff8249116ad4459c0069a" }, "ErnieForNextSentencePrediction": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForNextSentencePrediction" ], "sha": "ed5868efb39bf6afb29f0cf444deafcf1e50b5bc" }, "ErnieForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForPreTraining" ], "sha": "e4ad30d291c310fea25e6f91f91393f993513b42" }, "ErnieForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForQuestionAnswering" ], "sha": "fe7c74b763f63a9fd864dad325385075df7c80c8" }, "ErnieForSequenceClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForSequenceClassification" ], "sha": "84e0be05fcd52f54e96a69f67a2481323a58a9db" }, "ErnieForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForTokenClassification" ], "sha": "91cf62c43a5a83332552ffa2d8e5e44d63a224ea" }, "ErnieMForMultipleChoice": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMForMultipleChoice" ], "sha": "c42ee7fcb132a323ace314c32e63c8a7d36ce18f" }, "ErnieMForQuestionAnswering": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMForQuestionAnswering" ], "sha": "2b90dee75ca87b214f96db00002aa18244ec8e84" }, "ErnieMForSequenceClassification": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMForSequenceClassification" ], "sha": "d8368646d8b1c67b1460af9c6ec13fd9d894cae6" }, "ErnieMForTokenClassification": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMForTokenClassification" ], "sha": "a9e29ba60fa0b7bedc2ed26a6b9911427df1ca6b" }, "ErnieMModel": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMModel" ], "sha": "7306eac3f38c3cf6211f0e741fdb81c6cc92bc09" }, "ErnieModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieModel" ], "sha": "b51478a9f40e353c41be3a29ccef103dcfe22b4b" }, "EsmForMaskedLM": { "tokenizer_classes": [ "EsmTokenizer" ], "processor_classes": [], "model_classes": [ "EsmForMaskedLM", "TFEsmForMaskedLM" ], "sha": "b56297b6cd64b9ba7c613d0cd146f1ecbea8115e" }, "EsmForSequenceClassification": { "tokenizer_classes": [ "EsmTokenizer" ], "processor_classes": [], "model_classes": [ "EsmForSequenceClassification", "TFEsmForSequenceClassification" ], "sha": "cc6d7ef0a4763540d67b7a4fb31bede9a7d3f245" }, "EsmForTokenClassification": { "tokenizer_classes": [ "EsmTokenizer" ], "processor_classes": [], "model_classes": [ "EsmForTokenClassification", "TFEsmForTokenClassification" ], "sha": "498953f66e260b974c504abbc863ee266d6c84a9" }, "EsmModel": { "tokenizer_classes": [ "EsmTokenizer" ], "processor_classes": [], "model_classes": [ "EsmModel", "TFEsmModel" ], "sha": "183838263b70809310117a0761542501acf64c21" }, "FNetForMaskedLM": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForMaskedLM" ], "sha": "91eaae1eac894af5d96c0221ec9bcef7f1af41c8" }, "FNetForMultipleChoice": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForMultipleChoice" ], "sha": "c15d98d5f7a6f3ef3099b1257949bee208d5466e" }, "FNetForNextSentencePrediction": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForNextSentencePrediction" ], "sha": "c59440b44d07d61fc45a90ded7fc11d6f25b143d" }, "FNetForPreTraining": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForPreTraining" ], "sha": "c05f55ccfb2f2533babd3c6e99de7749bc8081da" }, "FNetForQuestionAnswering": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForQuestionAnswering" ], "sha": "47788e49dd435653fa2aa4b3ccae3572a870758e" }, "FNetForSequenceClassification": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForSequenceClassification" ], "sha": "a3049b896ea6c5a32c364989c3afe604ee58b9fc" }, "FNetForTokenClassification": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForTokenClassification" ], "sha": "3bcdafca57d544bb81e2f7eead1e512c168582fc" }, "FNetModel": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetModel" ], "sha": "48fa66de37df126504db3b658806135eb877f505" }, "FSMTForConditionalGeneration": { "tokenizer_classes": [ "FSMTTokenizer" ], "processor_classes": [], "model_classes": [ "FSMTForConditionalGeneration" ], "sha": "6a1a981b29c8a98c1fd31bd0ad809f5575ca6c7a" }, "FSMTModel": { "tokenizer_classes": [ "FSMTTokenizer" ], "processor_classes": [], "model_classes": [ "FSMTModel" ], "sha": "683f6f73a2ab87801f1695a72d1af63cf173ab7c" }, "FalconForCausalLM": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconForCausalLM" ], "sha": "60076d5dafc5e33ba9c90dcd05e7c0834e44049a" }, "FalconForQuestionAnswering": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconForQuestionAnswering" ], "sha": "b1ee9cd5fad2d177ea5a46df4611cd02f66ae788" }, "FalconForSequenceClassification": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconForSequenceClassification" ], "sha": "007838c0991c2b6a87dc49a8a5c20f29149a00fa" }, "FalconForTokenClassification": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconForTokenClassification" ], "sha": "0ea6ae548773daa6e3317fddc058957e956eebf4" }, "FalconModel": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconModel" ], "sha": "ca15a579c946eb00c5b39cc8e0ea63d0c1460f84" }, "FlaubertForMultipleChoice": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertForMultipleChoice", "TFFlaubertForMultipleChoice" ], "sha": "8b12bd87a63f2e86c3482431742f6d8abf6ec4fd" }, "FlaubertForQuestionAnsweringSimple": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertForQuestionAnsweringSimple", "TFFlaubertForQuestionAnsweringSimple" ], "sha": "5c0e7ad1efae7e3497f5cd6d2d9519403df49d37" }, "FlaubertForSequenceClassification": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertForSequenceClassification", "TFFlaubertForSequenceClassification" ], "sha": "762f12a8c99690be8ed2663b7af3011660174a7c" }, "FlaubertForTokenClassification": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertForTokenClassification", "TFFlaubertForTokenClassification" ], "sha": "d2ab741c937bb69ef27c89e4c86a8c9d444874ca" }, "FlaubertModel": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertModel", "TFFlaubertModel" ], "sha": "bdc2f8e17bb869393053429ec8c1c842bfeabb07" }, "FlaubertWithLMHeadModel": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertWithLMHeadModel", "TFFlaubertWithLMHeadModel" ], "sha": "f20eb0932c90061003c9cc4e109c6ea22559c4f2" }, "FlavaForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "FlavaImageProcessor" ], "model_classes": [ "FlavaForPreTraining" ], "sha": "6e9b2094060a5fa27984c7b49e5d0e820a88b487" }, "FlavaModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "FlavaImageProcessor" ], "model_classes": [ "FlavaModel" ], "sha": "31ebf1b7a0ef1fd5059b98e28e5ab1c366d2c482" }, "FocalNetBackbone": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "FocalNetBackbone" ], "sha": "eb8c580969443cb87de7dd9a256deaface03692f" }, "FocalNetForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "FocalNetForImageClassification" ], "sha": "28d30ded26a3213e8fb7011a455afc3aa98b0a95" }, "FocalNetForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "FocalNetForMaskedImageModeling" ], "sha": "0ea7626d19c9dd2f3113d977f643a1babc720bd3" }, "FocalNetModel": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "FocalNetModel" ], "sha": "107b004e6aa14108a359b7d22bdb9aa141ec05d5" }, "FunnelBaseModel": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelBaseModel", "TFFunnelBaseModel" ], "sha": "87fed4252812df23315a56531625333e315681c6" }, "FunnelForMaskedLM": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForMaskedLM", "TFFunnelForMaskedLM" ], "sha": "5543daf29f185cd45f2599bd6f38c96064c9c8de" }, "FunnelForMultipleChoice": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForMultipleChoice", "TFFunnelForMultipleChoice" ], "sha": "a8bf597e37dbefb1ac5c97c4cb162c3d522a33a1" }, "FunnelForPreTraining": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForPreTraining", "TFFunnelForPreTraining" ], "sha": "cbcb300d60aacd5950a45409b6e3f0f240c9082e" }, "FunnelForQuestionAnswering": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForQuestionAnswering", "TFFunnelForQuestionAnswering" ], "sha": "6a5675305e096434e818486a13892cb55daffd13" }, "FunnelForSequenceClassification": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForSequenceClassification", "TFFunnelForSequenceClassification" ], "sha": "1bc557a1e4314da21a44dee57b799e95a7025e5c" }, "FunnelForTokenClassification": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForTokenClassification", "TFFunnelForTokenClassification" ], "sha": "693bc1217a224efd558f410ddc8ffc63739bebc3" }, "FunnelModel": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelModel", "TFFunnelModel" ], "sha": "bfbaa8fa21c3abf80b94e7168b5ecff8ec5b5f76" }, "FuyuForCausalLM": { "tokenizer_classes": [ "LlamaTokenizerFast" ], "processor_classes": [ "FuyuImageProcessor" ], "model_classes": [ "FuyuForCausalLM" ], "sha": "685d78258ea95c5c82e0e4555d0d4a2270ab8bff" }, "GLPNForDepthEstimation": { "tokenizer_classes": [], "processor_classes": [ "GLPNImageProcessor" ], "model_classes": [ "GLPNForDepthEstimation" ], "sha": "32ca1c1ef5d33242e5e7c0433bcd773c082f0260" }, "GLPNModel": { "tokenizer_classes": [], "processor_classes": [ "GLPNImageProcessor" ], "model_classes": [ "GLPNModel" ], "sha": "24a8dbb48b1aa0ba2eba44324fcd0c78cca64dd4" }, "GPT2ForQuestionAnswering": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2ForQuestionAnswering" ], "sha": "a5bdd6bd4d79feece85ea9a8bd4ee5fe54c1d45b" }, "GPT2ForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2ForSequenceClassification", "TFGPT2ForSequenceClassification" ], "sha": "90a2d78e5c7f288152f8456c3d58a43b40a58449" }, "GPT2ForTokenClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2ForTokenClassification" ], "sha": "da78bc95b45fab2da9d43f2ca27164996e31ade1" }, "GPT2LMHeadModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2LMHeadModel", "TFGPT2LMHeadModel" ], "sha": "78f56535d4ce19e9d7c0992e390085c5a4196b37" }, "GPT2Model": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2Model", "TFGPT2Model" ], "sha": "d6694b0d8fe17978761c9305dc151780506b192e" }, "GPTBigCodeForCausalLM": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTBigCodeForCausalLM" ], "sha": "99f7aaadf9c29669c63ef6c16f6bc5c07dbb9126" }, "GPTBigCodeForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTBigCodeForSequenceClassification" ], "sha": "64a7398d5763161037b818314c60dd83d93d03e9" }, "GPTBigCodeForTokenClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTBigCodeForTokenClassification" ], "sha": "310537ecd22d45f71bf594b17922cf2abc338eaf" }, "GPTBigCodeModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTBigCodeModel" ], "sha": "3069419084a9dc36802d47de9df3d314ccfc2f28" }, "GPTJForCausalLM": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTJForCausalLM", "TFGPTJForCausalLM" ], "sha": "1fff390baa45cb187903ebdd269c975bb9ed7386" }, "GPTJForQuestionAnswering": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTJForQuestionAnswering", "TFGPTJForQuestionAnswering" ], "sha": "3d4ec61dbed01f844d4c309971eeb5ad722c6c84" }, "GPTJForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTJForSequenceClassification", "TFGPTJForSequenceClassification" ], "sha": "4b5db259cd16ca84ae2cd79aa4851cdd14479128" }, "GPTJModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTJModel", "TFGPTJModel" ], "sha": "d8e1db30d08fbf57da6fc139aea3ffd63ab6226e" }, "GPTNeoForCausalLM": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoForCausalLM" ], "sha": "e88934e402c15195dd99b2947632415dd7645268" }, "GPTNeoForQuestionAnswering": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoForQuestionAnswering" ], "sha": "623883e94bd08caf9b3f839b98debeea72d5bc2b" }, "GPTNeoForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoForSequenceClassification" ], "sha": "bf2090d5d91a70eb37ba51fbdcf23afc7031fea8" }, "GPTNeoForTokenClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoForTokenClassification" ], "sha": "d5208e73e24a1671219776b50fe5f96e0e4cd218" }, "GPTNeoModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoModel" ], "sha": "72a7cd49da613c3125a90884df4763545c594e56" }, "GPTNeoXForCausalLM": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXForCausalLM" ], "sha": "0229cfaaa843c6b492ac2abffabb00f1ff1936f8" }, "GPTNeoXForQuestionAnswering": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXForQuestionAnswering" ], "sha": "7d2f08c959c211129952ee03b5562add09fe6864" }, "GPTNeoXForSequenceClassification": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXForSequenceClassification" ], "sha": "17c4b845ee2e0bb780ca2dea2d59a3d9d5d3c651" }, "GPTNeoXForTokenClassification": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXForTokenClassification" ], "sha": "3aa4fe8a562f32230041d6d3616aa5ecc3f30192" }, "GPTNeoXJapaneseForCausalLM": { "tokenizer_classes": [ "GPTNeoXJapaneseTokenizer" ], "processor_classes": [], "model_classes": [ "GPTNeoXJapaneseForCausalLM" ], "sha": "5fca2479f1064fd22e17f944c8fcc14f7e73f1d5" }, "GPTNeoXJapaneseModel": { "tokenizer_classes": [ "GPTNeoXJapaneseTokenizer" ], "processor_classes": [], "model_classes": [ "GPTNeoXJapaneseModel" ], "sha": "5c6ed124150df845cfc701d70b97fdcde687be52" }, "GPTNeoXModel": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXModel" ], "sha": "33114ba2f72189d5a2bd63f0cdb78551189242ff" }, "GPTSanJapaneseForConditionalGeneration": { "tokenizer_classes": [ "GPTSanJapaneseTokenizer" ], "processor_classes": [], "model_classes": [ "GPTSanJapaneseForConditionalGeneration" ], "sha": "ff6a41faaa713c7fbd5d9a1a50539745f9e1178e" }, "GitForCausalLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "GitForCausalLM" ], "sha": "60f9c50466ae0beeb11776ca5bfeb6473f441554" }, "GitModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "GitModel" ], "sha": "3d2eb6bddf95bb4a4e59b045d4e464c730c07f41" }, "GroupViTModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "GroupViTModel", "TFGroupViTModel" ], "sha": "05a3a02dd46cb9eb078608dec98f633c0cf559ef" }, "HubertForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "HubertForCTC" ], "sha": "13431b76106f993eedcff48a75bae590a09b14f7" }, "HubertForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "HubertForSequenceClassification" ], "sha": "d23f46607a900b1a55dfee4b7ed205a6823035b1" }, "HubertModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "HubertModel", "TFHubertModel" ], "sha": "3224562c86c4669db65ae7defdc5fb555b113e95" }, "IBertForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForMaskedLM" ], "sha": "e333a9c9d375f4d839b7e9e21d1a1c8dad58d7d1" }, "IBertForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForMultipleChoice" ], "sha": "a81f7d64cd7ce5fe6cd726b23d9d14ac5d17bf53" }, "IBertForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForQuestionAnswering" ], "sha": "7b66d13d4d6801a82cbeb7f9fd853ca1630d1f8b" }, "IBertForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForSequenceClassification" ], "sha": "309d57145c40f889222fe5df62f14dddf4496b38" }, "IBertForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForTokenClassification" ], "sha": "b032e9bff4b081b78c098b2d8bc610ac035c6ddf" }, "IBertModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertModel" ], "sha": "6749164c678d4883d455f98b1dfc98c62da8f08b" }, "IdeficsForVisionText2Text": { "tokenizer_classes": [ "LlamaTokenizerFast" ], "processor_classes": [ "IdeficsImageProcessor" ], "model_classes": [ "IdeficsForVisionText2Text" ], "sha": "2c2f2e2cd6b02a77d0cdd8c3767ba9a6267dbd20" }, "IdeficsModel": { "tokenizer_classes": [ "LlamaTokenizerFast" ], "processor_classes": [ "IdeficsImageProcessor" ], "model_classes": [ "IdeficsModel" ], "sha": "649df2e35e067efd573ff2d083784a5cf876545e" }, "ImageGPTForCausalImageModeling": { "tokenizer_classes": [], "processor_classes": [ "ImageGPTImageProcessor" ], "model_classes": [ "ImageGPTForCausalImageModeling" ], "sha": "9a7d1fc04439ab1d9d690de9c3e7673f08568cdf" }, "ImageGPTForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ImageGPTImageProcessor" ], "model_classes": [ "ImageGPTForImageClassification" ], "sha": "d92c7aed4ba5de74a1f542b736010090e4a58b42" }, "ImageGPTModel": { "tokenizer_classes": [], "processor_classes": [ "ImageGPTImageProcessor" ], "model_classes": [ "ImageGPTModel" ], "sha": "5a7983e48d5841704733dd0756177680ed50c074" }, "Kosmos2ForConditionalGeneration": { "tokenizer_classes": [ "XLMRobertaTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "Kosmos2ForConditionalGeneration" ], "sha": "d1d4607782b911411676f1ee79997dee645def58" }, "Kosmos2Model": { "tokenizer_classes": [ "XLMRobertaTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "Kosmos2Model" ], "sha": "379d8944a65312094d9ab1c4b8a82058a2d3274e" }, "LEDForConditionalGeneration": { "tokenizer_classes": [ "LEDTokenizer", "LEDTokenizerFast" ], "processor_classes": [], "model_classes": [ "LEDForConditionalGeneration", "TFLEDForConditionalGeneration" ], "sha": "a354b49a79351f3ea8ae7776d9f8352ae26cfc14" }, "LEDForQuestionAnswering": { "tokenizer_classes": [ "LEDTokenizer", "LEDTokenizerFast" ], "processor_classes": [], "model_classes": [ "LEDForQuestionAnswering" ], "sha": "47c7a75a1e650dae60ff6e9bbab0f2386946670c" }, "LEDForSequenceClassification": { "tokenizer_classes": [ "LEDTokenizer", "LEDTokenizerFast" ], "processor_classes": [], "model_classes": [ "LEDForSequenceClassification" ], "sha": "3571e2c9d9f2f2ec0b8fe47090330b128be05126" }, "LEDModel": { "tokenizer_classes": [ "LEDTokenizer", "LEDTokenizerFast" ], "processor_classes": [], "model_classes": [ "LEDModel", "TFLEDModel" ], "sha": "3c3f6eb142545afc570187bfdabfe65d43dafbe4" }, "LayoutLMForMaskedLM": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMForMaskedLM", "TFLayoutLMForMaskedLM" ], "sha": "0368bd9bd8fd3eb43b8a3b38962b5345b8765514" }, "LayoutLMForQuestionAnswering": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMForQuestionAnswering", "TFLayoutLMForQuestionAnswering" ], "sha": "0d6a4bc614fccfa313c1fb6d132a250929518f85" }, "LayoutLMForSequenceClassification": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMForSequenceClassification", "TFLayoutLMForSequenceClassification" ], "sha": "1bd68c73dbf6c8c0526d24fbe2831be82998c440" }, "LayoutLMForTokenClassification": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMForTokenClassification", "TFLayoutLMForTokenClassification" ], "sha": "155e7da3f1d786aa39d957b16080c52de4a7efd7" }, "LayoutLMModel": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMModel", "TFLayoutLMModel" ], "sha": "14f77b30d267910f11f0fd532a91a6b85ab3a4de" }, "LayoutLMv2ForQuestionAnswering": { "tokenizer_classes": [ "LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" ], "processor_classes": [ "LayoutLMv2ImageProcessor" ], "model_classes": [ "LayoutLMv2ForQuestionAnswering" ], "sha": "f452e28dd34d3c38cce046b1cc7b0ada69f587b1" }, "LayoutLMv2ForSequenceClassification": { "tokenizer_classes": [ "LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" ], "processor_classes": [ "LayoutLMv2ImageProcessor" ], "model_classes": [ "LayoutLMv2ForSequenceClassification" ], "sha": "b483e08fd143113629ecda3dbfd57e69bfeb5f11" }, "LayoutLMv2ForTokenClassification": { "tokenizer_classes": [ "LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" ], "processor_classes": [ "LayoutLMv2ImageProcessor" ], "model_classes": [ "LayoutLMv2ForTokenClassification" ], "sha": "0721ae69bff00ecfff1b3d1521a475cde0253299" }, "LayoutLMv2Model": { "tokenizer_classes": [ "LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" ], "processor_classes": [ "LayoutLMv2ImageProcessor" ], "model_classes": [ "LayoutLMv2Model" ], "sha": "6a1b510769b344979a910a7d0bade613a9ec2dfc" }, "LayoutLMv3ForQuestionAnswering": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [ "LayoutLMv3ImageProcessor" ], "model_classes": [ "LayoutLMv3ForQuestionAnswering", "TFLayoutLMv3ForQuestionAnswering" ], "sha": "4640242388e69cf77ea2dd3ac36ec6f1b26628c8" }, "LayoutLMv3ForSequenceClassification": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [ "LayoutLMv3ImageProcessor" ], "model_classes": [ "LayoutLMv3ForSequenceClassification", "TFLayoutLMv3ForSequenceClassification" ], "sha": "96515f699874cfbfbec7a64c539ae92419e4c6dc" }, "LayoutLMv3ForTokenClassification": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [ "LayoutLMv3ImageProcessor" ], "model_classes": [ "LayoutLMv3ForTokenClassification", "TFLayoutLMv3ForTokenClassification" ], "sha": "ed4ffc464f2028fe50dfc6823f4eda78d34be7e6" }, "LayoutLMv3Model": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [ "LayoutLMv3ImageProcessor" ], "model_classes": [ "LayoutLMv3Model", "TFLayoutLMv3Model" ], "sha": "69725e5e2445e5c1c3aa8a2aa49cfd72e0a44565" }, "LevitForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "LevitImageProcessor" ], "model_classes": [ "LevitForImageClassification" ], "sha": "5ae8ccaa1fe1c947cb8ae6499e4a150c668bb9f0" }, "LevitForImageClassificationWithTeacher": { "tokenizer_classes": [], "processor_classes": [ "LevitImageProcessor" ], "model_classes": [ "LevitForImageClassificationWithTeacher" ], "sha": "568cc0d965b9bd293f240e7724314db6d50f6722" }, "LevitModel": { "tokenizer_classes": [], "processor_classes": [ "LevitImageProcessor" ], "model_classes": [ "LevitModel" ], "sha": "172efa52b50c75c3b3e498fa638f55e65b2ebf87" }, "LiltForQuestionAnswering": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [], "model_classes": [ "LiltForQuestionAnswering" ], "sha": "0a348441999e98ec003b29fc4d5a67ad22ee6ca2" }, "LiltForSequenceClassification": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [], "model_classes": [ "LiltForSequenceClassification" ], "sha": "c53ab0ba33536fe564a4a1e4f1674d990c01b83a" }, "LiltForTokenClassification": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [], "model_classes": [ "LiltForTokenClassification" ], "sha": "14f85076f9b3f7016917e324d51ebd22511a2ae5" }, "LiltModel": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [], "model_classes": [ "LiltModel" ], "sha": "3f1166cc14c532388df7e82336a8e575a813bd3f" }, "LongT5ForConditionalGeneration": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "LongT5ForConditionalGeneration" ], "sha": "c685cbbe706ad5c9a28689631765726a1874dcc7" }, "LongT5Model": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "LongT5Model" ], "sha": "6b468e55e2490565e6155690201086ac00c72062" }, "LongformerForMaskedLM": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForMaskedLM", "TFLongformerForMaskedLM" ], "sha": "929d3bda9a1485d9bae41f9dbfc1d149c1c4e78e" }, "LongformerForMultipleChoice": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForMultipleChoice", "TFLongformerForMultipleChoice" ], "sha": "60b1ecac6b9385ce18c7e6978ab161cce8e7f9d4" }, "LongformerForQuestionAnswering": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForQuestionAnswering", "TFLongformerForQuestionAnswering" ], "sha": "be45ab1321b703f2200cbbcae560aaf2e2afef88" }, "LongformerForSequenceClassification": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForSequenceClassification", "TFLongformerForSequenceClassification" ], "sha": "8bc0de0b0f740bf397eb2770ec3ce3a24f3d7af9" }, "LongformerForTokenClassification": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForTokenClassification", "TFLongformerForTokenClassification" ], "sha": "efa33a9b6f47f0f7979af08ae8d04a5a7363a14b" }, "LongformerModel": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerModel", "TFLongformerModel" ], "sha": "b023d531688e8655fc09300ac36742588efb3240" }, "LukeForMaskedLM": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForMaskedLM" ], "sha": "954cf6cd2bf1f298a3956b10c36656c57387506d" }, "LukeForMultipleChoice": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForMultipleChoice" ], "sha": "d1310a9174ad50d60b30ad6049e165deb2539034" }, "LukeForQuestionAnswering": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForQuestionAnswering" ], "sha": "3ea38da4e32cb4e45bea82b2e81a8639aeba2c35" }, "LukeForSequenceClassification": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForSequenceClassification" ], "sha": "b5b11248aeb4f5976379d15a977aeb2677e0c0f9" }, "LukeForTokenClassification": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForTokenClassification" ], "sha": "8aab1a33ad26a344a6f4dfd68630e9661e174471" }, "LukeModel": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeModel" ], "sha": "ae23a674e7297d41f33c9af86e039757dfd2d531" }, "LxmertForPreTraining": { "tokenizer_classes": [ "LxmertTokenizer", "LxmertTokenizerFast" ], "processor_classes": [], "model_classes": [ "LxmertForPreTraining", "TFLxmertForPreTraining" ], "sha": "7b0843403c187aef00f20d5087086468d9613d2c" }, "LxmertForQuestionAnswering": { "tokenizer_classes": [ "LxmertTokenizer", "LxmertTokenizerFast" ], "processor_classes": [], "model_classes": [ "LxmertForQuestionAnswering" ], "sha": "27a74bd2cd156e46656c43ceb432c4deda0df5c1" }, "LxmertModel": { "tokenizer_classes": [ "LxmertTokenizer", "LxmertTokenizerFast" ], "processor_classes": [], "model_classes": [ "LxmertModel", "TFLxmertModel" ], "sha": "97612a0d6b14406ea9bfd7672e6974e0961cbef1" }, "M2M100ForConditionalGeneration": { "tokenizer_classes": [ "M2M100Tokenizer" ], "processor_classes": [], "model_classes": [ "M2M100ForConditionalGeneration" ], "sha": "32ac347092d51f658b41ffc111b67d49acdeab46" }, "M2M100Model": { "tokenizer_classes": [ "M2M100Tokenizer" ], "processor_classes": [], "model_classes": [ "M2M100Model" ], "sha": "e95c2ae168c7ba19f8114def40e1b1edd953b2f5" }, "MBartForCausalLM": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartForCausalLM" ], "sha": "a45044f8056328d20a764356eca3d0746a7a195e" }, "MBartForConditionalGeneration": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartForConditionalGeneration", "TFMBartForConditionalGeneration" ], "sha": "171e918962d6c0ee56c6b070858e19e16c8dd09f" }, "MBartForQuestionAnswering": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartForQuestionAnswering" ], "sha": "1ee08565d24777335595e0d2940e454abdcff731" }, "MBartForSequenceClassification": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartForSequenceClassification" ], "sha": "53e9c88ecfa2475d27afe099ffa7a8bcdb7ef7e4" }, "MBartModel": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartModel", "TFMBartModel" ], "sha": "2d492b34d69dd63b411990d5c8bb692fd637e91c" }, "MCTCTForCTC": { "tokenizer_classes": [], "processor_classes": [ "MCTCTFeatureExtractor" ], "model_classes": [ "MCTCTForCTC" ], "sha": "895a3d74f87b344b1f0a71eae4f085941d51b5cf" }, "MCTCTModel": { "tokenizer_classes": [], "processor_classes": [ "MCTCTFeatureExtractor" ], "model_classes": [ "MCTCTModel" ], "sha": "ce73d5c2b6fe163de778697d7b0543bf00d7ffa8" }, "MPNetForMaskedLM": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForMaskedLM", "TFMPNetForMaskedLM" ], "sha": "50af96e7d0202aef86e396c136e4c4fde8afe183" }, "MPNetForMultipleChoice": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForMultipleChoice", "TFMPNetForMultipleChoice" ], "sha": "af4ff8bf296a3a51f5ab6cd9f56741e4c732487c" }, "MPNetForQuestionAnswering": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForQuestionAnswering", "TFMPNetForQuestionAnswering" ], "sha": "3e1a25c0d3243f78f81580c312ada3b39c06b428" }, "MPNetForSequenceClassification": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForSequenceClassification", "TFMPNetForSequenceClassification" ], "sha": "43da45c0a0d73c5a5567b4c7ec512ec5023e52dd" }, "MPNetForTokenClassification": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForTokenClassification", "TFMPNetForTokenClassification" ], "sha": "4e825eff24df533321ebab823eb66ce67e4ab3d9" }, "MPNetModel": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetModel", "TFMPNetModel" ], "sha": "847c68344c2922e9a71fa8835b87a0f6f72b9f47" }, "MarianForCausalLM": { "tokenizer_classes": [ "MarianTokenizer" ], "processor_classes": [], "model_classes": [], "sha": "5fb205e6db8e18e3c6cdd4e4709be292ba4599f3" }, "MarianMTModel": { "tokenizer_classes": [ "MarianTokenizer" ], "processor_classes": [], "model_classes": [ "MarianMTModel", "TFMarianMTModel" ], "sha": "0405f542b31561592231a86e3009d05256cbf49f" }, "MarianModel": { "tokenizer_classes": [ "MarianTokenizer" ], "processor_classes": [], "model_classes": [ "MarianModel", "TFMarianModel" ], "sha": "3649748c0286c6d5179a7013a716f7314db182a8" }, "MarkupLMForQuestionAnswering": { "tokenizer_classes": [ "MarkupLMTokenizer", "MarkupLMTokenizerFast" ], "processor_classes": [ "MarkupLMFeatureExtractor" ], "model_classes": [ "MarkupLMForQuestionAnswering" ], "sha": "c8bb9f93591d980362547b0bdca9f23ace2f383e" }, "MarkupLMForSequenceClassification": { "tokenizer_classes": [ "MarkupLMTokenizer", "MarkupLMTokenizerFast" ], "processor_classes": [ "MarkupLMFeatureExtractor" ], "model_classes": [ "MarkupLMForSequenceClassification" ], "sha": "c2cb7245d68d76e0a5f993fc8a3de099ecebc68b" }, "MarkupLMForTokenClassification": { "tokenizer_classes": [ "MarkupLMTokenizer", "MarkupLMTokenizerFast" ], "processor_classes": [ "MarkupLMFeatureExtractor" ], "model_classes": [ "MarkupLMForTokenClassification" ], "sha": "b9f924e82f400de0b34b46ee4ba276d686bd4890" }, "MarkupLMModel": { "tokenizer_classes": [ "MarkupLMTokenizer", "MarkupLMTokenizerFast" ], "processor_classes": [ "MarkupLMFeatureExtractor" ], "model_classes": [ "MarkupLMModel" ], "sha": "9687ba29f1c59d978e3d4b0fa702031f88eff53b" }, "Mask2FormerForUniversalSegmentation": { "tokenizer_classes": [], "processor_classes": [ "Mask2FormerImageProcessor" ], "model_classes": [ "Mask2FormerForUniversalSegmentation" ], "sha": "6429a7349527c9ef140ae691b83c47702cce1bc0" }, "Mask2FormerModel": { "tokenizer_classes": [], "processor_classes": [ "Mask2FormerImageProcessor" ], "model_classes": [ "Mask2FormerModel" ], "sha": "9bee8709204024b3669d503cdfe8890182f2a075" }, "MaskFormerForInstanceSegmentation": { "tokenizer_classes": [], "processor_classes": [ "MaskFormerImageProcessor" ], "model_classes": [ "MaskFormerForInstanceSegmentation" ], "sha": "f844aaa81f55cb199c115f1bf95c217a70685570" }, "MaskFormerModel": { "tokenizer_classes": [], "processor_classes": [ "MaskFormerImageProcessor" ], "model_classes": [ "MaskFormerModel" ], "sha": "473b54a464bc0ccee29bc23b4f6610f32eec05af" }, "MegaForCausalLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForCausalLM" ], "sha": "6642b9da860f8b62abcfb0660feabcebf6698418" }, "MegaForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForMaskedLM" ], "sha": "6b2d47ba03bec9e6f7eefdd4a67351fa191aae6f" }, "MegaForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForMultipleChoice" ], "sha": "2b1e751da36a4410473eef07a62b09227a26d504" }, "MegaForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForQuestionAnswering" ], "sha": "612acd9a53c351c42514adb3c04f2057d2870be7" }, "MegaForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForSequenceClassification" ], "sha": "4871572da1613b7e9cfd3640c6d1129af004eefb" }, "MegaForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForTokenClassification" ], "sha": "450d3722c3b995215d06b9c12544c99f958581c7" }, "MegaModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaModel" ], "sha": "ca0862db27428893fe22f9bb5d2eb0875c2156f3" }, "MegatronBertForCausalLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForCausalLM" ], "sha": "ff08d05ef8f98fdccf1f01560ec6ec4adbc8a3e3" }, "MegatronBertForMaskedLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForMaskedLM" ], "sha": "2ed25e2681d26b51b404ef1347a385c5f2c86a9a" }, "MegatronBertForMultipleChoice": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForMultipleChoice" ], "sha": "1485af4b75f8f234d2b4b5aea50ab2ec55223a15" }, "MegatronBertForNextSentencePrediction": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForNextSentencePrediction" ], "sha": "52bc9ee1d5145344f66b088ed278f07ed3d90584" }, "MegatronBertForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForPreTraining" ], "sha": "e580d0efd54e1c92789e39b32929234e36ee427f" }, "MegatronBertForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForQuestionAnswering" ], "sha": "7342ba042a3c30c15382d00fcb0521533fc43841" }, "MegatronBertForSequenceClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForSequenceClassification" ], "sha": "6a7cd480511d817a1e221c8f7558c55a93baed1b" }, "MegatronBertForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForTokenClassification" ], "sha": "8b5334b6ec5f025293ca861de474b57ca84bc005" }, "MegatronBertModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertModel" ], "sha": "f2457fbe535ba97ea13db049f53618b42e13f047" }, "MgpstrForSceneTextRecognition": { "tokenizer_classes": [], "processor_classes": [ "MgpstrProcessor" ], "model_classes": [ "MgpstrForSceneTextRecognition" ], "sha": "f197d5bfa1fe27b5f28a6e6d4e3ad229b753450a" }, "MistralForCausalLM": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MistralForCausalLM" ], "sha": "f7e06aeedbba8f4f665b438b868ed932d451f64b" }, "MistralForSequenceClassification": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MistralForSequenceClassification" ], "sha": "65045444ea1933309270d8b08b21d3fa94a84290" }, "MistralModel": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MistralModel" ], "sha": "becd727ad72b1e8a7c0fa0ea39b61904fa68aeac" }, "MobileBertForMaskedLM": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForMaskedLM", "TFMobileBertForMaskedLM" ], "sha": "d689e737d73ad23aed3aabd3177591fc827d1c62" }, "MobileBertForMultipleChoice": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForMultipleChoice", "TFMobileBertForMultipleChoice" ], "sha": "403d1f88be7eb0c769ff3a8e57eab21cc3e75afb" }, "MobileBertForNextSentencePrediction": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForNextSentencePrediction", "TFMobileBertForNextSentencePrediction" ], "sha": "b4d8836a0f259ee3bca9f230093836c9117c5e4d" }, "MobileBertForPreTraining": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForPreTraining", "TFMobileBertForPreTraining" ], "sha": "fbaa13ea6f9fcebb9fde620dd009d12510440d17" }, "MobileBertForQuestionAnswering": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForQuestionAnswering", "TFMobileBertForQuestionAnswering" ], "sha": "ba6a55cf2daec55bfb220c9bab0bc4ad96510087" }, "MobileBertForSequenceClassification": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForSequenceClassification", "TFMobileBertForSequenceClassification" ], "sha": "17ab35603bec351457e035eef2d0426538071f72" }, "MobileBertForTokenClassification": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForTokenClassification", "TFMobileBertForTokenClassification" ], "sha": "dee83e820e6c4f069886a5d1875bf6775897313e" }, "MobileBertModel": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertModel", "TFMobileBertModel" ], "sha": "09b2db33ea798a762eeaf7e727e95f9ea8a6d14f" }, "MobileNetV1ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV1ImageProcessor" ], "model_classes": [ "MobileNetV1ForImageClassification" ], "sha": "55023dbd0935f147bf1bccf960cea01ca07e0f0c" }, "MobileNetV1Model": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV1ImageProcessor" ], "model_classes": [ "MobileNetV1Model" ], "sha": "178bd24528147a028938d6ee5c7e65c969ea37b0" }, "MobileNetV2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV2ImageProcessor" ], "model_classes": [ "MobileNetV2ForImageClassification" ], "sha": "ff907f740cf9ea91bc3cdf403a94ae28fbb2548a" }, "MobileNetV2ForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV2ImageProcessor" ], "model_classes": [ "MobileNetV2ForSemanticSegmentation" ], "sha": "48adbc340e42882f52b54d4f5dd045e16e9ef2d6" }, "MobileNetV2Model": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV2ImageProcessor" ], "model_classes": [ "MobileNetV2Model" ], "sha": "e876885828825472a80ef1796d89d60b901813ba" }, "MobileViTForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTForImageClassification", "TFMobileViTForImageClassification" ], "sha": "7d0b31864f856e00f9e34e8c6781dcc7a8cdaf1e" }, "MobileViTForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTForSemanticSegmentation", "TFMobileViTForSemanticSegmentation" ], "sha": "215f727caa3c3fc94fa4df486aa706e5d99d4194" }, "MobileViTModel": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTModel", "TFMobileViTModel" ], "sha": "b3a1452e7cb44b600b21ee14f3d5382366855a46" }, "MobileViTV2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTV2ForImageClassification" ], "sha": "25752b0967ad594341d1b685401450d7f698433c" }, "MobileViTV2ForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTV2ForSemanticSegmentation" ], "sha": "13b953f50be33219d55a12f1098be38b88000897" }, "MobileViTV2Model": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTV2Model" ], "sha": "2f46357659db2d6d54d870e28073deeea1c8cb64" }, "MptForCausalLM": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptForCausalLM" ], "sha": "500c869b956c65f6b1a7b4867727f124c6f5728a" }, "MptForQuestionAnswering": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptForQuestionAnswering" ], "sha": "6ee46572bf61eb5e7dbbdaf00b73c4d37efc42d9" }, "MptForSequenceClassification": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptForSequenceClassification" ], "sha": "f0b9153413b5dfceeb96b67d4b0f22c94bbaf64a" }, "MptForTokenClassification": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptForTokenClassification" ], "sha": "3f7c3ccd67cd0b2aae56d37613429a64ef813246" }, "MptModel": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptModel" ], "sha": "ea747f234556661b0c8b84a626f267066ce586bf" }, "MraForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForMaskedLM" ], "sha": "c00ee46cfd2b8fed29cc37f0a4ead40ad51a439c" }, "MraForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForMultipleChoice" ], "sha": "f397469ba8109f64dab2d75335ea7bf0c2dbeb74" }, "MraForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForQuestionAnswering" ], "sha": "c2ed75acd20e5440a76d6504d9a3ebc2513011f0" }, "MraForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForSequenceClassification" ], "sha": "f47672d3708508bda7774215bee44a92ec16ab2f" }, "MraForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForTokenClassification" ], "sha": "f0961ab5818bca473607fb94b391c186dc1d3492" }, "MraModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraModel" ], "sha": "315f34f30bcc4b0b66b11987726df2a80c50e271" }, "MusicgenForCausalLM": { "tokenizer_classes": [ "T5TokenizerFast" ], "processor_classes": [], "model_classes": [], "sha": "f67d387eaaa7c71ddf88af95eda4bf14ace08d49" }, "MusicgenForConditionalGeneration": { "tokenizer_classes": [ "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "MusicgenForConditionalGeneration" ], "sha": "16102cdf580e70cf0b4e0e2cda5bc75b934da92c" }, "MvpForCausalLM": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpForCausalLM" ], "sha": "105e5f2c8a0f20d404cb71795539cda5dd49716d" }, "MvpForConditionalGeneration": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpForConditionalGeneration" ], "sha": "b0b706f14b2f8aae288cba30ae0064e0be7e888b" }, "MvpForQuestionAnswering": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpForQuestionAnswering" ], "sha": "82f152b36a40a4c22edcb146e6eaec636d84fa2d" }, "MvpForSequenceClassification": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpForSequenceClassification" ], "sha": "506b68544d064001929ee9e6db3752e62972a6aa" }, "MvpModel": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpModel" ], "sha": "3f4653184721a2bc029b27706d335ef7ddd219d5" }, "NatBackbone": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "NatBackbone" ], "sha": "d5cc5eccba4da609c82e9f5c649301b9f9fee9fb" }, "NatForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "NatForImageClassification" ], "sha": "2ff4c9e73c49c392c02a467e87b5511fd924242a" }, "NatModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "NatModel" ], "sha": "75e9756bb94d0ccdce98a8e963eeecbc66f9d573" }, "NezhaForMaskedLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForMaskedLM" ], "sha": "5991cca4b78f0ed7299259a71f3eeed3f3452b72" }, "NezhaForMultipleChoice": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForMultipleChoice" ], "sha": "0f6e9ec791d85ad4503acdec50b3a120f984016b" }, "NezhaForNextSentencePrediction": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForNextSentencePrediction" ], "sha": "9a34316c14ec8ecc98ff08e46760915c80098a57" }, "NezhaForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForPreTraining" ], "sha": "6259db427a0073061de352ea819d38a74798edd7" }, "NezhaForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForQuestionAnswering" ], "sha": "31c6a34e85ae8c41294e0f4ef25044e00e511c4d" }, "NezhaForSequenceClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForSequenceClassification" ], "sha": "db057c308ba2e05f223404de11e1816ce4bd62a9" }, "NezhaForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForTokenClassification" ], "sha": "235f4e10b4a59709650c2bece3e342ec153d9cfc" }, "NezhaModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaModel" ], "sha": "80e05ba7c55bcdd7f4d1387ef9a09a7a8e95b5ac" }, "NllbMoeForConditionalGeneration": { "tokenizer_classes": [ "NllbTokenizer", "NllbTokenizerFast" ], "processor_classes": [], "model_classes": [ "NllbMoeForConditionalGeneration" ], "sha": "2a7f87dffe826af3d52086888f3f3773246e5528" }, "NllbMoeModel": { "tokenizer_classes": [ "NllbTokenizer", "NllbTokenizerFast" ], "processor_classes": [], "model_classes": [ "NllbMoeModel" ], "sha": "9f7a2261eed4658e1aa5623be4672ba64bee7da5" }, "NystromformerForMaskedLM": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForMaskedLM" ], "sha": "37036847783f1e65e81ecd43803270a1ecb276f3" }, "NystromformerForMultipleChoice": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForMultipleChoice" ], "sha": "42a077d5ab6830e20560466eaccc525eff10c3ae" }, "NystromformerForQuestionAnswering": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForQuestionAnswering" ], "sha": "1cfaf79051731824db4f09989f093f87f4fceec5" }, "NystromformerForSequenceClassification": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForSequenceClassification" ], "sha": "d75231203066df41e9b6b25dbee9ad40e8515c18" }, "NystromformerForTokenClassification": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForTokenClassification" ], "sha": "5a499dc96e106bf41fc9166f2ad06527ec7ca14e" }, "NystromformerModel": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerModel" ], "sha": "2b6adb37ec473b15d71e2eb459acea08df6940ce" }, "OPTForCausalLM": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "OPTForCausalLM", "TFOPTForCausalLM" ], "sha": "190d1f4fc0011d2eaeaa05282e0fbd2445e4b11f" }, "OPTForQuestionAnswering": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "OPTForQuestionAnswering" ], "sha": "0fa9277ce10dbc3d0922b354befb684a136af00b" }, "OPTForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "OPTForSequenceClassification" ], "sha": "784ab288ab7280b1853ee400ef10ee2a965df352" }, "OPTModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "OPTModel", "TFOPTModel" ], "sha": "901d92b8f51edb0ec9614cb185fb66a8b5d364c3" }, "OneFormerForUniversalSegmentation": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "OneFormerImageProcessor" ], "model_classes": [ "OneFormerForUniversalSegmentation" ], "sha": "fee1cfd676acc40f09017702ddac6504f3090d14" }, "OneFormerModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "OneFormerImageProcessor" ], "model_classes": [ "OneFormerModel" ], "sha": "4163a79328c78f93ec57942598698a138c19a577" }, "OpenAIGPTForSequenceClassification": { "tokenizer_classes": [ "OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" ], "processor_classes": [], "model_classes": [ "OpenAIGPTForSequenceClassification", "TFOpenAIGPTForSequenceClassification" ], "sha": "c513f7f952935085f7573bf70a1ac3ad8f33434c" }, "OpenAIGPTLMHeadModel": { "tokenizer_classes": [ "OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" ], "processor_classes": [], "model_classes": [ "OpenAIGPTLMHeadModel", "TFOpenAIGPTLMHeadModel" ], "sha": "33f59ecd860f7a998483ec7631fe32d257235461" }, "OpenAIGPTModel": { "tokenizer_classes": [ "OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" ], "processor_classes": [], "model_classes": [ "OpenAIGPTModel", "TFOpenAIGPTModel" ], "sha": "00f6ec0a3a5276af71d08a26199e0ccbf2556fc9" }, "OwlViTForObjectDetection": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "OwlViTImageProcessor" ], "model_classes": [ "OwlViTForObjectDetection" ], "sha": "af958c9164f23d0f12921a8edf687f9aaa6af90e" }, "OwlViTModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "OwlViTImageProcessor" ], "model_classes": [ "OwlViTModel" ], "sha": "f0e27b2b4e53ba70e05d13dcfea8e85272b292a5" }, "Owlv2ForObjectDetection": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "Owlv2ImageProcessor" ], "model_classes": [ "Owlv2ForObjectDetection" ], "sha": "30439c0b2749726468dc13a755261e8101170052" }, "Owlv2Model": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "Owlv2ImageProcessor" ], "model_classes": [ "Owlv2Model" ], "sha": "7aeebdad5f72b36cb07c74355afad8e6052e2377" }, "PLBartForCausalLM": { "tokenizer_classes": [ "PLBartTokenizer" ], "processor_classes": [], "model_classes": [ "PLBartForCausalLM" ], "sha": "6ee51133246dbdb18fc3681ebd62d21e421b9bb4" }, "PLBartForConditionalGeneration": { "tokenizer_classes": [ "PLBartTokenizer" ], "processor_classes": [], "model_classes": [ "PLBartForConditionalGeneration" ], "sha": "ba191d28f4678d20b4dfed5fca5944018282cf20" }, "PLBartForSequenceClassification": { "tokenizer_classes": [ "PLBartTokenizer" ], "processor_classes": [], "model_classes": [ "PLBartForSequenceClassification" ], "sha": "02063b3d9707fcff619a4e37a0d6e58f76e39b18" }, "PLBartModel": { "tokenizer_classes": [ "PLBartTokenizer" ], "processor_classes": [], "model_classes": [ "PLBartModel" ], "sha": "cfbba29169b3f40d800403fc1b53982e1f88c5f8" }, "PegasusForCausalLM": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusForCausalLM" ], "sha": "6e685a698302a3ba33e5379d3a37eb0bc1ae2f70" }, "PegasusForConditionalGeneration": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusForConditionalGeneration", "TFPegasusForConditionalGeneration" ], "sha": "15e58ee2ebc14b6e80ef2891259057ee5f049be2" }, "PegasusModel": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusModel", "TFPegasusModel" ], "sha": "fa36b24523db411ef77903453346b8be81ef73fe" }, "PegasusXForConditionalGeneration": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusXForConditionalGeneration" ], "sha": "7588a8120f26a36c1687c14bdf1e9f9656891c1a" }, "PegasusXModel": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusXModel" ], "sha": "a0bdff627416ac3c39c22d081f5d88d8b8fd99cc" }, "PerceiverForImageClassificationConvProcessing": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForImageClassificationConvProcessing" ], "sha": "2c1e5e62ebc9d0c931adc8c665fb05bde6c1c1f1" }, "PerceiverForImageClassificationFourier": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForImageClassificationFourier" ], "sha": "88da41b8851b76b8be0dacdb3de023db02bb031a" }, "PerceiverForImageClassificationLearned": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForImageClassificationLearned" ], "sha": "879bd1fa38d3baddb027bb2cacba2d160a741375" }, "PerceiverForMaskedLM": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForMaskedLM" ], "sha": "1d2459cbd281ef72da5682e65102aaca96183045" }, "PerceiverForSequenceClassification": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForSequenceClassification" ], "sha": "576f1f96348f0343458499fbf53d4102b5c0f2ff" }, "PerceiverModel": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverModel" ], "sha": "83ec4d2d61ed62525ee033e13d144817beb29d19" }, "PersimmonForCausalLM": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "PersimmonForCausalLM" ], "sha": "454234d6496c3857f5bf3eafb784616e2cd3ea82" }, "PersimmonForSequenceClassification": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "PersimmonForSequenceClassification" ], "sha": "1d2674846543a181ca67bafa8b8f3a48bd2eefd1" }, "PersimmonModel": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "PersimmonModel" ], "sha": "b8c8d479e29e9ee048e2d0b05b001ac835ad8859" }, "PhiForCausalLM": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "PhiForCausalLM" ], "sha": "3fecc0109a4a3a230e3a5509eaf47a26eba85d79" }, "PhiForSequenceClassification": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "PhiForSequenceClassification" ], "sha": "e1c9f8ebf1317516acc1cd6338de71a53e770245" }, "PhiForTokenClassification": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "PhiForTokenClassification" ], "sha": "d3a8054903753b5c96c05eaf9877905a116a1d5e" }, "PhiModel": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "PhiModel" ], "sha": "99c38d5ce7ace35127d00ed3eeb3561308ea6b21" }, "Pix2StructForConditionalGeneration": { "tokenizer_classes": [ "T5TokenizerFast" ], "processor_classes": [ "Pix2StructImageProcessor", "Pix2StructProcessor" ], "model_classes": [ "Pix2StructForConditionalGeneration" ], "sha": "42b3de00ad535076c4893e4ac5ae2d2748cc4ccb" }, "PoolFormerForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "PoolFormerImageProcessor" ], "model_classes": [ "PoolFormerForImageClassification" ], "sha": "ef04de5a6896100d457fb9553dd9789c09cca98e" }, "PoolFormerModel": { "tokenizer_classes": [], "processor_classes": [ "PoolFormerImageProcessor" ], "model_classes": [ "PoolFormerModel" ], "sha": "e8037215ebdbf795329ef6525cdc6aa547f04ace" }, "ProphetNetForCausalLM": { "tokenizer_classes": [ "ProphetNetTokenizer" ], "processor_classes": [], "model_classes": [ "ProphetNetForCausalLM" ], "sha": "d40b1e75bbc5ea0839563457aff6eee5bc0bb03e" }, "ProphetNetForConditionalGeneration": { "tokenizer_classes": [ "ProphetNetTokenizer" ], "processor_classes": [], "model_classes": [ "ProphetNetForConditionalGeneration" ], "sha": "d842875c41278032af39c03c66902786bb5ff2c7" }, "ProphetNetModel": { "tokenizer_classes": [ "ProphetNetTokenizer" ], "processor_classes": [], "model_classes": [ "ProphetNetModel" ], "sha": "f1ddbbcc768c7ba54c4d75b319540c1635e65937" }, "PvtForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "PvtImageProcessor" ], "model_classes": [ "PvtForImageClassification" ], "sha": "589b37bd6941aff6dd248259f9eee3c422a41fde" }, "PvtModel": { "tokenizer_classes": [], "processor_classes": [ "PvtImageProcessor" ], "model_classes": [ "PvtModel" ], "sha": "c40765c382515ae627652d60e9077b6478448d48" }, "ReformerForMaskedLM": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "ReformerForMaskedLM" ], "sha": "1e6431e42c676b525e3215e9e3cc8f1404f9f82b" }, "ReformerForQuestionAnswering": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "ReformerForQuestionAnswering" ], "sha": "62b43977f244474bd6982c6327d0c57310258fcd" }, "ReformerForSequenceClassification": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "ReformerForSequenceClassification" ], "sha": "67bd534a990a7dcfa02406987e7f066caa2a30e8" }, "ReformerModel": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "ReformerModel" ], "sha": "a34ddb1389067448e9bc1323de674951cfb4cff1" }, "ReformerModelWithLMHead": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [], "sha": "e7a8addaea8407d4c55e144e48aee04be6cca618" }, "RegNetForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "RegNetForImageClassification", "TFRegNetForImageClassification" ], "sha": "5ec67c84fc7944c0c5b386bd26820bc4d1f3b32a" }, "RegNetModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "RegNetModel", "TFRegNetModel" ], "sha": "72375e1401dc8271d4abb6295c9cee376f7b8f1a" }, "RemBertForCausalLM": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForCausalLM", "TFRemBertForCausalLM" ], "sha": "8d9ae3d74a0e0a8958b4ee8c9dca3632abf52ef9" }, "RemBertForMaskedLM": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForMaskedLM", "TFRemBertForMaskedLM" ], "sha": "b7c27d01e1cc3bef9ddd6a78627d700b3bffd759" }, "RemBertForMultipleChoice": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForMultipleChoice", "TFRemBertForMultipleChoice" ], "sha": "2fe192677b9740cf24dd559339d46925e8ac23d4" }, "RemBertForQuestionAnswering": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForQuestionAnswering", "TFRemBertForQuestionAnswering" ], "sha": "22b8ba44681b96292a1cf7f6df4ba6bb7937ec6e" }, "RemBertForSequenceClassification": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForSequenceClassification", "TFRemBertForSequenceClassification" ], "sha": "20f3e89341ea15266d2685a8798142fba03c3f98" }, "RemBertForTokenClassification": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForTokenClassification", "TFRemBertForTokenClassification" ], "sha": "15712ff753708da3cf0550e76e73a5d0bba7784e" }, "RemBertModel": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertModel", "TFRemBertModel" ], "sha": "59cc6d099b1ded0aaead8684457415b129f79e86" }, "ResNetBackbone": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ResNetBackbone" ], "sha": "c84a6bcf8af4b6a3403dea3cf4c55965ac39f239" }, "ResNetForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ResNetForImageClassification", "TFResNetForImageClassification" ], "sha": "34a180ad24d80811d420d7aa4fbec4a17751aaf8" }, "ResNetModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ResNetModel", "TFResNetModel" ], "sha": "fafa6cdf9986c6cfbae360596b3574162430bcd3" }, "RoCBertForCausalLM": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForCausalLM" ], "sha": "194d8dafc4f4142f8d31e6b4be14b55d812f923b" }, "RoCBertForMaskedLM": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForMaskedLM" ], "sha": "8bc285f32f3b932dbd56ddf91b1170734d638eeb" }, "RoCBertForMultipleChoice": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForMultipleChoice" ], "sha": "bb54e5ae021d728022d34b12fee3f087d9486af9" }, "RoCBertForPreTraining": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForPreTraining" ], "sha": "86ebbd5b0bc84660ad7f505082eff19b86c137c8" }, "RoCBertForQuestionAnswering": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForQuestionAnswering" ], "sha": "1bfc2dc3d6e76170e6dca1ff32a54a0887ff28a3" }, "RoCBertForSequenceClassification": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForSequenceClassification" ], "sha": "c329038802241f454273894128fea38b60f7c739" }, "RoCBertForTokenClassification": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForTokenClassification" ], "sha": "afe5ec22c2ad1d9ff6e3e64c87eb7555faaa936d" }, "RoCBertModel": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertModel" ], "sha": "29de5580d5f5d3461a88673e7b4c492a9d8a67a4" }, "RoFormerForCausalLM": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForCausalLM", "TFRoFormerForCausalLM" ], "sha": "6e074219c6dd8f8b221bbfda64fba100f729f88d" }, "RoFormerForMaskedLM": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForMaskedLM", "TFRoFormerForMaskedLM" ], "sha": "a3a4d05f9b29601553a77244f2adcf8194f9367c" }, "RoFormerForMultipleChoice": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForMultipleChoice", "TFRoFormerForMultipleChoice" ], "sha": "aca3999a1d14f09644faed44e2cdfb28ed68a3d3" }, "RoFormerForQuestionAnswering": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForQuestionAnswering", "TFRoFormerForQuestionAnswering" ], "sha": "b8a20b3a788f178b9ef64e2eb9587f693dca1b69" }, "RoFormerForSequenceClassification": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForSequenceClassification", "TFRoFormerForSequenceClassification" ], "sha": "d092e2d5e62012bf4ec921e763b37865d6189216" }, "RoFormerForTokenClassification": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForTokenClassification", "TFRoFormerForTokenClassification" ], "sha": "85d3a17062e1f3e0539abfe738a88203e25349b6" }, "RoFormerModel": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerModel", "TFRoFormerModel" ], "sha": "22e7df2f4cd66caf449f2342f63d176005afccc9" }, "RobertaForCausalLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForCausalLM", "TFRobertaForCausalLM" ], "sha": "5d1d24d56f9735402e50a2ea513ffde44487733e" }, "RobertaForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForMaskedLM", "TFRobertaForMaskedLM" ], "sha": "b21c9daf0b3b66530bf5d45d67df5ec392b5059c" }, "RobertaForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForMultipleChoice", "TFRobertaForMultipleChoice" ], "sha": "10020d9546d4d7318f4d514fe13daaad07e6269f" }, "RobertaForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForQuestionAnswering", "TFRobertaForQuestionAnswering" ], "sha": "eea4a81306891746bac9e7715f805a2d9dbf4be7" }, "RobertaForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForSequenceClassification", "TFRobertaForSequenceClassification" ], "sha": "6a6f53fc6ab98e29ed539e76b1cb76d25a2cd720" }, "RobertaForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForTokenClassification", "TFRobertaForTokenClassification" ], "sha": "9190044c4091eb0d98ae7638c453e24846bca5d7" }, "RobertaModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaModel", "TFRobertaModel" ], "sha": "181a0b8a7ad24500ec327ad07ddb225f0680ac0a" }, "RobertaPreLayerNormForCausalLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForCausalLM", "TFRobertaPreLayerNormForCausalLM" ], "sha": "73b6d4531b41f295a5d310d7aa44736004a59865" }, "RobertaPreLayerNormForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForMaskedLM", "TFRobertaPreLayerNormForMaskedLM" ], "sha": "a61723c77e5ab7adc95285e7823a0a49b99af395" }, "RobertaPreLayerNormForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForMultipleChoice", "TFRobertaPreLayerNormForMultipleChoice" ], "sha": "3dcfa62e0771358c60232a18135bfe7c7f6d715e" }, "RobertaPreLayerNormForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForQuestionAnswering", "TFRobertaPreLayerNormForQuestionAnswering" ], "sha": "a8e76a5a50f7df60055e5ed6a1c3af2e7d34cf01" }, "RobertaPreLayerNormForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForSequenceClassification", "TFRobertaPreLayerNormForSequenceClassification" ], "sha": "7509cb0286d146ef2fc6beb8867ae31b92fb1b16" }, "RobertaPreLayerNormForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForTokenClassification", "TFRobertaPreLayerNormForTokenClassification" ], "sha": "3ad5814ba126b41e18c1978c970e396fab6da9bf" }, "RobertaPreLayerNormModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormModel", "TFRobertaPreLayerNormModel" ], "sha": "4830db38fd310404c5ab70bd00684eca0bc06ca8" }, "RwkvForCausalLM": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "RwkvForCausalLM" ], "sha": "2f452fd46b39e39b1a6a95fa1d8232405bbb3e96" }, "RwkvModel": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "RwkvModel" ], "sha": "88a52c9437dc3c06f65a8252490be7eb91197804" }, "SEWDForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWDForCTC" ], "sha": "5c7495c77ae9e0f12c0de05d3a5fb95bdcd91768" }, "SEWDForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWDForSequenceClassification" ], "sha": "d6cbf1164ce1999fdaf3deeb7a6eba19a3b1f873" }, "SEWDModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWDModel" ], "sha": "dde4e02219449f149bb3403bbeae127cafaf9c79" }, "SEWForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWForCTC" ], "sha": "4477c7a277059fba08772acf91cf3e3dd3cb073b" }, "SEWForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWForSequenceClassification" ], "sha": "3b90fbb1c0c3848fed18f91a0169bb297a3e6619" }, "SEWModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWModel" ], "sha": "0a0fbb844eeefa0dce62bd05db30a2bb91e5dc88" }, "SamModel": { "tokenizer_classes": [], "processor_classes": [ "SamImageProcessor" ], "model_classes": [ "SamModel", "TFSamModel" ], "sha": "eca8651bc84e5ac3b1b62e784b744a6bd1b82575" }, "SegformerForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "SegformerImageProcessor" ], "model_classes": [ "SegformerForImageClassification", "TFSegformerForImageClassification" ], "sha": "c566ae0ed382be4ed61ed6dacffa2ba663e9cc19" }, "SegformerForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "SegformerImageProcessor" ], "model_classes": [ "SegformerForSemanticSegmentation", "TFSegformerForSemanticSegmentation" ], "sha": "b73798972cdf24daafa858994713aca60e2bf90d" }, "SegformerModel": { "tokenizer_classes": [], "processor_classes": [ "SegformerImageProcessor" ], "model_classes": [ "SegformerModel", "TFSegformerModel" ], "sha": "3d4ba8ed2bdf801e6afa855b9d77893f2b7f9e10" }, "Speech2TextForConditionalGeneration": { "tokenizer_classes": [ "Speech2TextTokenizer" ], "processor_classes": [ "Speech2TextFeatureExtractor" ], "model_classes": [ "Speech2TextForConditionalGeneration", "TFSpeech2TextForConditionalGeneration" ], "sha": "1da80293ec78762e136cf6dd64b652693f9ab364" }, "Speech2TextModel": { "tokenizer_classes": [ "Speech2TextTokenizer" ], "processor_classes": [ "Speech2TextFeatureExtractor" ], "model_classes": [ "Speech2TextModel", "TFSpeech2TextModel" ], "sha": "7c6e63bd0c15dd99ef01573d4c43f90e4920cc91" }, "SpeechEncoderDecoderModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SpeechEncoderDecoderModel" ], "sha": "78602ae0857728e95de4042bdca8a31ef818890a" }, "SpeechT5ForSpeechToText": { "tokenizer_classes": [ "SpeechT5Tokenizer" ], "processor_classes": [ "SpeechT5FeatureExtractor" ], "model_classes": [ "SpeechT5ForSpeechToText" ], "sha": "d46f0a83324e5865420a27a738ef203292de3479" }, "SpeechT5ForTextToSpeech": { "tokenizer_classes": [ "SpeechT5Tokenizer" ], "processor_classes": [ "SpeechT5FeatureExtractor" ], "model_classes": [ "SpeechT5ForTextToSpeech" ], "sha": "922e748d9e1ea256a8d9259782021cd3820d5924" }, "SpeechT5Model": { "tokenizer_classes": [ "SpeechT5Tokenizer" ], "processor_classes": [ "SpeechT5FeatureExtractor" ], "model_classes": [ "SpeechT5Model" ], "sha": "7b248f77ca88ffddcdb538e772f6de63a86a4f9b" }, "SplinterForPreTraining": { "tokenizer_classes": [ "SplinterTokenizer" ], "processor_classes": [], "model_classes": [ "SplinterForPreTraining" ], "sha": "e8a94efa740f1d685fa553f49132c6f022de5389" }, "SplinterForQuestionAnswering": { "tokenizer_classes": [ "SplinterTokenizer" ], "processor_classes": [], "model_classes": [ "SplinterForQuestionAnswering" ], "sha": "d038b7b683face4a361ab0f474d8a5b111c44c4d" }, "SplinterModel": { "tokenizer_classes": [ "SplinterTokenizer" ], "processor_classes": [], "model_classes": [ "SplinterModel" ], "sha": "a35b13cbb7faba46dc265761bb839267eb53d248" }, "SqueezeBertForMaskedLM": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForMaskedLM" ], "sha": "33ce239408c22d2c98be63c9ab4607ef9ceb6d49" }, "SqueezeBertForMultipleChoice": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForMultipleChoice" ], "sha": "7e9e666896420c7839e27dcb280981d034ba4da5" }, "SqueezeBertForQuestionAnswering": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForQuestionAnswering" ], "sha": "bceb045a9ac6eb2ded7d358ed577c6dc28ea487a" }, "SqueezeBertForSequenceClassification": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForSequenceClassification" ], "sha": "c5aeb1f454a1d059d41a5f8dacaf784b9de0b899" }, "SqueezeBertForTokenClassification": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForTokenClassification" ], "sha": "70ba60ca44a380e6aa983a37b163c57217219df7" }, "SqueezeBertModel": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertModel" ], "sha": "e0a3ac56a4047da3f921638252ead5e44438bbdb" }, "SwiftFormerForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwiftFormerForImageClassification" ], "sha": "a249b14a525d29e675b6e4af4baacd9ba7df7598" }, "SwiftFormerModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwiftFormerModel" ], "sha": "25ba2d88c770533f8c69811d2a454a00c1d09f5d" }, "Swin2SRForImageSuperResolution": { "tokenizer_classes": [], "processor_classes": [ "Swin2SRImageProcessor" ], "model_classes": [ "Swin2SRForImageSuperResolution" ], "sha": "3a2780de0b455084c018ac8a62b56040969e26ec" }, "Swin2SRModel": { "tokenizer_classes": [], "processor_classes": [ "Swin2SRImageProcessor" ], "model_classes": [ "Swin2SRModel" ], "sha": "c67f6ecff9ef8675c3869c987277b0a1e040f4be" }, "SwinBackbone": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwinBackbone" ], "sha": "89b28b8ec05a7b3357be75a77eb7809e6fd5cfef" }, "SwinForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwinForImageClassification", "TFSwinForImageClassification" ], "sha": "e3c2e80f380ef79781313981da1a993dd8b8d34d" }, "SwinForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwinForMaskedImageModeling", "TFSwinForMaskedImageModeling" ], "sha": "d84b061fbace1bc6e697e3253e222de42053f978" }, "SwinModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwinModel", "TFSwinModel" ], "sha": "23ff641295660ec4fea399be8aa1bc14565961f8" }, "Swinv2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "Swinv2ForImageClassification" ], "sha": "3fd755cdf4cf611db83f72f9c9b00eb9257a38ca" }, "Swinv2ForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "Swinv2ForMaskedImageModeling" ], "sha": "8375c31eb6231fde36ec6533a34ba5b28e296163" }, "Swinv2Model": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "Swinv2Model" ], "sha": "70aeb72e8a266f668c8b51a517ec01003b8d6804" }, "SwitchTransformersForConditionalGeneration": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "SwitchTransformersForConditionalGeneration" ], "sha": "c8fcd2bb735894c78db7f1e5b51afc78aced7adb" }, "SwitchTransformersModel": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "SwitchTransformersModel" ], "sha": "275bbf6d389bfd0540b9f824c609c6b22a577328" }, "T5EncoderModel": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5EncoderModel", "TFT5EncoderModel" ], "sha": "1c75090036a2b3740dfe2d570b889332ad8e59e8" }, "T5ForConditionalGeneration": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5ForConditionalGeneration", "TFT5ForConditionalGeneration" ], "sha": "593fd6072a4e265f5cc73b1973cd8af76b261f29" }, "T5ForQuestionAnswering": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5ForQuestionAnswering" ], "sha": "b9edf2de494244ff032f67d2d7bdf6c591000c94" }, "T5ForSequenceClassification": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5ForSequenceClassification" ], "sha": "105b5c4c8e1efe927444108f1388c4f102ebad15" }, "T5Model": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5Model", "TFT5Model" ], "sha": "eb3d20dda0ba77c1de618d78116a1a0c784c515c" }, "TableTransformerForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "TableTransformerForObjectDetection" ], "sha": "9cf1e3f5c3555a727672a32b49f8b96c5aa20be6" }, "TableTransformerModel": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "TableTransformerModel" ], "sha": "7b446244d8739b0c29d98f7d537b15ad578577d5" }, "TapasForMaskedLM": { "tokenizer_classes": [ "TapasTokenizer" ], "processor_classes": [], "model_classes": [ "TFTapasForMaskedLM", "TapasForMaskedLM" ], "sha": "2cedb92dd9a3dc37ffb7d35ad5190b110992577c" }, "TapasForQuestionAnswering": { "tokenizer_classes": [ "TapasTokenizer" ], "processor_classes": [], "model_classes": [ "TFTapasForQuestionAnswering", "TapasForQuestionAnswering" ], "sha": "4cc91b9e5db662e6e392d8052587ae419896d72b" }, "TapasForSequenceClassification": { "tokenizer_classes": [ "TapasTokenizer" ], "processor_classes": [], "model_classes": [ "TFTapasForSequenceClassification", "TapasForSequenceClassification" ], "sha": "7c37bfb87a6fce2f8604bb3cab2a14e09a285e14" }, "TapasModel": { "tokenizer_classes": [ "TapasTokenizer" ], "processor_classes": [], "model_classes": [ "TFTapasModel", "TapasModel" ], "sha": "bc004af0a415afe1f566c3afe8dd4d48d08c1ce0" }, "TimesformerForVideoClassification": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "TimesformerForVideoClassification" ], "sha": "0b3b8e314618d7af34fb44477745491b44bf556d" }, "TimesformerModel": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "TimesformerModel" ], "sha": "ea51f7ebb6426ad2b1fa1396e83f8e8ad5bc3b44" }, "TransfoXLForSequenceClassification": { "tokenizer_classes": [ "TransfoXLTokenizer" ], "processor_classes": [], "model_classes": [ "TFTransfoXLForSequenceClassification", "TransfoXLForSequenceClassification" ], "sha": "f3d370184350667d74056b979081b0bf5b0083c1" }, "TransfoXLLMHeadModel": { "tokenizer_classes": [ "TransfoXLTokenizer" ], "processor_classes": [], "model_classes": [ "TFTransfoXLLMHeadModel", "TransfoXLLMHeadModel" ], "sha": "e0d4cebcdde52d8d4c81782a1edc606830bd6afd" }, "TransfoXLModel": { "tokenizer_classes": [ "TransfoXLTokenizer" ], "processor_classes": [], "model_classes": [ "TFTransfoXLModel", "TransfoXLModel" ], "sha": "6938eeae35662a862accb01412dfc486454bdc8f" }, "TvltForPreTraining": { "tokenizer_classes": [], "processor_classes": [ "TvltProcessor" ], "model_classes": [ "TvltForPreTraining" ], "sha": "f7bd2833764eb6d55a921aaed81d3f21119016ae" }, "TvltModel": { "tokenizer_classes": [], "processor_classes": [ "TvltProcessor" ], "model_classes": [ "TvltModel" ], "sha": "c3cbf7a6159c038f333ce7adda2480ea3396b2b3" }, "UMT5EncoderModel": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "UMT5EncoderModel" ], "sha": "2894e49c9fbd17ea4b3dab56ec388be354c1a5f0" }, "UMT5ForQuestionAnswering": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "UMT5ForQuestionAnswering" ], "sha": "b381aa068a44200db539f2f48f4e34a5ed1cb093" }, "UMT5ForSequenceClassification": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "UMT5ForSequenceClassification" ], "sha": "aa9f77b7b3cff21425b7512e7c0f478af7b5db14" }, "UMT5Model": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "UMT5Model" ], "sha": "9180d850b24e5494442a4f7a8ca1a4c102f9babd" }, "UniSpeechForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechForCTC" ], "sha": "102b56d76f4d74cface309801c0ad80892583751" }, "UniSpeechForPreTraining": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechForPreTraining" ], "sha": "830be5b3e85aaae7bcc961218e417c29743d6042" }, "UniSpeechForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechForSequenceClassification" ], "sha": "a30ac1516944757ccd8efcbcf94033a03f8708bf" }, "UniSpeechModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechModel" ], "sha": "18e170eb1091715b74ace28c8c380b6bf2b6202d" }, "UniSpeechSatForAudioFrameClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForAudioFrameClassification" ], "sha": "7eba5a1c6cd610928b27ecb217bb17c729a07a57" }, "UniSpeechSatForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForCTC" ], "sha": "a8617538d3a2ae990f022bb0c36b8428a4870822" }, "UniSpeechSatForPreTraining": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForPreTraining" ], "sha": "a772f66db0ab49e1050e524d7fcbe5106ebdaf96" }, "UniSpeechSatForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForSequenceClassification" ], "sha": "f1c16567bd829a6d8a7a2d167d22e9653149e625" }, "UniSpeechSatForXVector": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForXVector" ], "sha": "71cb3780cf3678f74fba00e19df82df76dca6133" }, "UniSpeechSatModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatModel" ], "sha": "ea755bbc7c6c6aa649c58b4b000f243acbbd6b5a" }, "UperNetForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "SegformerImageProcessor" ], "model_classes": [ "UperNetForSemanticSegmentation" ], "sha": "f1871cb388bc0b203f5397bfc06a373736c2fb9c" }, "VanForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "VanForImageClassification" ], "sha": "694eb147bc4768aeabeffbfb97732281b71a621d" }, "VanModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "VanModel" ], "sha": "d8ac60ce952020f2b0355fc566d634b2c5ba635d" }, "ViTForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFViTForImageClassification", "ViTForImageClassification" ], "sha": "5b3b44a3ed492070c273e481e30ecf4deddc5ec3" }, "ViTForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "ViTForMaskedImageModeling" ], "sha": "d984e0b432fe195c2c26952d4f249031e7b1e2ea" }, "ViTHybridForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTHybridImageProcessor" ], "model_classes": [ "ViTHybridForImageClassification" ], "sha": "69c7c396032ffe60d54953b584394899fb95ccc1" }, "ViTHybridModel": { "tokenizer_classes": [], "processor_classes": [ "ViTHybridImageProcessor" ], "model_classes": [ "ViTHybridModel" ], "sha": "077443bfefe40d625314dbd274d2ff8089624797" }, "ViTMAEForPreTraining": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFViTMAEForPreTraining", "ViTMAEForPreTraining" ], "sha": "2d98d80d9c45eef0d5b6f5426d7196bb546fe9fc" }, "ViTMAEModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFViTMAEModel", "ViTMAEModel" ], "sha": "c7c2f12c19d2dbec08851a9dac7485909629a5fd" }, "ViTMSNForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "ViTMSNForImageClassification" ], "sha": "feda819aa7dbb55d850130f4cf1d210858d7eb89" }, "ViTMSNModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "ViTMSNModel" ], "sha": "0733abf168cb47a149821fdd2113d546e15c47de" }, "ViTModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFViTModel", "ViTModel" ], "sha": "31817b7a64ebc3333fcd4801dfbb356ab07b13dd" }, "VideoMAEForPreTraining": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "VideoMAEForPreTraining" ], "sha": "9de66c4bb759dc7269a7af17bf70b3194550acaa" }, "VideoMAEForVideoClassification": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "VideoMAEForVideoClassification" ], "sha": "d3f743408386bc0ffe2d979de35335e87bc34aec" }, "VideoMAEModel": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "VideoMAEModel" ], "sha": "a2be96beba888817d92b67525601569d830342ff" }, "ViltForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "ViltImageProcessor" ], "model_classes": [ "ViltForQuestionAnswering" ], "sha": "faeffbf43da6621717d8b13e7ebe87d58d750cb2" }, "ViltModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "ViltImageProcessor" ], "model_classes": [ "ViltModel" ], "sha": "3a89b7b5782947c4f4125162ffe1c9cc18c9c800" }, "VisionEncoderDecoderModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFVisionEncoderDecoderModel", "VisionEncoderDecoderModel" ], "sha": "23917761070cf16b26a6d033b6bff9100bbc618b" }, "VisionTextDualEncoderModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFVisionTextDualEncoderModel", "VisionTextDualEncoderModel" ], "sha": "c3569ef17f66acbacb76f7ceb6f71e02d075dd6c" }, "VisualBertForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "VisualBertForPreTraining" ], "sha": "ce5a4d93ce762971cd216cda9aef8b9ce3f0450b" }, "VisualBertModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "VisualBertModel" ], "sha": "85020189fb7bf1217eb9370b09bca8ec5bcfdafa" }, "VitsModel": { "tokenizer_classes": [ "VitsTokenizer" ], "processor_classes": [], "model_classes": [ "VitsModel" ], "sha": "b9a20ca5b6a7874576e485850260578895587dd2" }, "Wav2Vec2ConformerForAudioFrameClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForAudioFrameClassification" ], "sha": "e316a18a1d165b4cb51a7f28f8e8dab676da4b56" }, "Wav2Vec2ConformerForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForCTC" ], "sha": "a2ecb2985fcbb9f3ed000c12c1af6da36f5eaa3a" }, "Wav2Vec2ConformerForPreTraining": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForPreTraining" ], "sha": "099279b69e5da19efb05589804ccee210a0e57ae" }, "Wav2Vec2ConformerForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForSequenceClassification" ], "sha": "e8c1bca543c54bf15a6c026cb3761993b52cf617" }, "Wav2Vec2ConformerForXVector": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForXVector" ], "sha": "ba206a55998f16e134960728bd02006eaf39114f" }, "Wav2Vec2ConformerModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerModel" ], "sha": "ef2fe3aa8c23e6f8696e6612061aaddecae49994" }, "Wav2Vec2ForAudioFrameClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForAudioFrameClassification" ], "sha": "ab219f119e10f56e1059966c66d23f0df3c2c343" }, "Wav2Vec2ForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForCTC" ], "sha": "6245fbb1cb99cea5c4de1e73f81fba978fb275ac" }, "Wav2Vec2ForMaskedLM": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForMaskedLM" ], "sha": "e083cf4fefec4df3c241dbbe5e17a84a794a89bd" }, "Wav2Vec2ForPreTraining": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForPreTraining" ], "sha": "a8d71e216334260353ccbf5ce84cd6924f7457da" }, "Wav2Vec2ForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "TFWav2Vec2ForSequenceClassification", "Wav2Vec2ForSequenceClassification" ], "sha": "2000b2022abcc37100241485f5872126b70164c9" }, "Wav2Vec2ForXVector": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForXVector" ], "sha": "f4c422db53aae061ea609f4407af7cd5b33c8942" }, "Wav2Vec2Model": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "TFWav2Vec2Model", "Wav2Vec2Model" ], "sha": "7a998ee3ee0619a52828a79c3eed6872fd053f37" }, "WavLMForAudioFrameClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMForAudioFrameClassification" ], "sha": "b135610f8d5de0b1a5bf5ed7212966135c63d6ec" }, "WavLMForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMForCTC" ], "sha": "f1139c5ddf34d2327ae1f6917edd7da180b06971" }, "WavLMForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMForSequenceClassification" ], "sha": "4ba5f2019b46866ce2011c993194ebda60afc028" }, "WavLMForXVector": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMForXVector" ], "sha": "faf9264eac56a56d5510a0984d7e1146e4c8cf62" }, "WavLMModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMModel" ], "sha": "e932275e37cb643be271f655bd1d649f4f4b4bd5" }, "WhisperForAudioClassification": { "tokenizer_classes": [ "WhisperTokenizer" ], "processor_classes": [ "WhisperFeatureExtractor" ], "model_classes": [ "WhisperForAudioClassification" ], "sha": "d71b13674b1a67443cd19d0594a3b5b1e5968f0d" }, "WhisperForCausalLM": { "tokenizer_classes": [ "WhisperTokenizer" ], "processor_classes": [ "WhisperFeatureExtractor" ], "model_classes": [ "WhisperForCausalLM" ], "sha": "e7febfd7f4512e029293c677e6d2633e23fc459a" }, "WhisperForConditionalGeneration": { "tokenizer_classes": [ "WhisperTokenizer", "WhisperTokenizerFast" ], "processor_classes": [ "WhisperFeatureExtractor" ], "model_classes": [ "TFWhisperForConditionalGeneration", "WhisperForConditionalGeneration" ], "sha": "598101b885b24508042d9292e54aa04bff96318e" }, "WhisperModel": { "tokenizer_classes": [ "WhisperTokenizer", "WhisperTokenizerFast" ], "processor_classes": [ "WhisperFeatureExtractor" ], "model_classes": [ "TFWhisperModel", "WhisperModel" ], "sha": "c04c50216bb6b0a8f4d55f2fa9f9f4cf61c8a77c" }, "XCLIPModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "XCLIPModel" ], "sha": "299ffffc6b94c3558bf7dbc38e24074c99490046" }, "XGLMForCausalLM": { "tokenizer_classes": [ "XGLMTokenizer", "XGLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXGLMForCausalLM", "XGLMForCausalLM" ], "sha": "d5381ce297c249d559937c6bb6316cf1fdad2613" }, "XGLMModel": { "tokenizer_classes": [ "XGLMTokenizer", "XGLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXGLMModel", "XGLMModel" ], "sha": "2b5cef167822cfaa558d259af1722e2f785cd3d5" }, "XLMForMultipleChoice": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMForMultipleChoice", "XLMForMultipleChoice" ], "sha": "f0c8cc6462449ac9eb9b4158e433bd3c923db3af" }, "XLMForQuestionAnsweringSimple": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMForQuestionAnsweringSimple", "XLMForQuestionAnsweringSimple" ], "sha": "82e93a2653cf3646eaaf02d8cc5f8ff9a4551523" }, "XLMForSequenceClassification": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMForSequenceClassification", "XLMForSequenceClassification" ], "sha": "2d6892f5f703be9b481bca91477032bd0e36dbe5" }, "XLMForTokenClassification": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMForTokenClassification", "XLMForTokenClassification" ], "sha": "9a591395e7a0643a03f5d2debb98caa3966e021c" }, "XLMModel": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMModel", "XLMModel" ], "sha": "022b86df246414ff712475d9ca55db690ff1d3bf" }, "XLMRobertaXLForCausalLM": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForCausalLM" ], "sha": "fc05408e5b33a31638476ef337719dfbb7615ef3" }, "XLMRobertaXLForMaskedLM": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForMaskedLM" ], "sha": "e96f198eede757e5ae2c87632fdcfb341073ef6e" }, "XLMRobertaXLForMultipleChoice": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForMultipleChoice" ], "sha": "52732625f1bfbbb7cb4ba1cf0963de596d81822d" }, "XLMRobertaXLForQuestionAnswering": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForQuestionAnswering" ], "sha": "da388fdd2d28e0757eb0c2b2c612a8ff03af2223" }, "XLMRobertaXLForSequenceClassification": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForSequenceClassification" ], "sha": "980721187633bcf21ac0b8edbed933527f4611df" }, "XLMRobertaXLForTokenClassification": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForTokenClassification" ], "sha": "37a97280faf6fef0bd946d3934d77a1b60fbf473" }, "XLMRobertaXLModel": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLModel" ], "sha": "8fbeb39a984912e47f5d24a31be61639031a0fc3" }, "XLMWithLMHeadModel": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMWithLMHeadModel", "XLMWithLMHeadModel" ], "sha": "db70bdefbaf095e88b8097e4b601d9105a511afa" }, "XLNetForMultipleChoice": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetForMultipleChoice", "XLNetForMultipleChoice" ], "sha": "8bb7e28d0cd1e93154d3232baf5e9c79acaf9f1a" }, "XLNetForQuestionAnsweringSimple": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetForQuestionAnsweringSimple", "XLNetForQuestionAnsweringSimple" ], "sha": "fabd06a45d947f3d46f1b8dce2186cf3b27776dc" }, "XLNetForSequenceClassification": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetForSequenceClassification", "XLNetForSequenceClassification" ], "sha": "e3c194f24537ebf2c474ade60becb9397696edec" }, "XLNetForTokenClassification": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetForTokenClassification", "XLNetForTokenClassification" ], "sha": "16aa15029aa667046d504c4a88ceddfdd5b5fb40" }, "XLNetLMHeadModel": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetLMHeadModel", "XLNetLMHeadModel" ], "sha": "c9a98cc982a16ca162832a8cbea25116479bb938" }, "XLNetModel": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetModel", "XLNetModel" ], "sha": "1d6e231942135faf32b8d9a97773d8f6c85ca561" }, "XmodForCausalLM": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForCausalLM" ], "sha": "c6b746071f2f067099a8fb4f57ce3c27a7e4b67d" }, "XmodForMaskedLM": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForMaskedLM" ], "sha": "e1085818f4ed3c6073b2038635e5f3061208923d" }, "XmodForMultipleChoice": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForMultipleChoice" ], "sha": "c63042cdf196be3fed846421b345d439b2483f69" }, "XmodForQuestionAnswering": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForQuestionAnswering" ], "sha": "75acd3071fae9978c82618cd0f090c87aabc1f23" }, "XmodForSequenceClassification": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForSequenceClassification" ], "sha": "523a16570be048618913ac17ccd00d343bcb5e99" }, "XmodForTokenClassification": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForTokenClassification" ], "sha": "a0f0a02732b4579670dad11a69ae244ebd777b49" }, "XmodModel": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodModel" ], "sha": "bc286de0035450e7dcd6bcce78098a967b9c2b6c" }, "YolosForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "YolosImageProcessor" ], "model_classes": [ "YolosForObjectDetection" ], "sha": "0a4aae25bfbe8b5edd4815cb00d697a6ba7d2126" }, "YolosModel": { "tokenizer_classes": [], "processor_classes": [ "YolosImageProcessor" ], "model_classes": [ "YolosModel" ], "sha": "339bc51f1914f031a550e5f95095ed4a4c22a7de" }, "YosoForMaskedLM": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForMaskedLM" ], "sha": "cb291bedcbec199ea195f086e3ebea6fab026bba" }, "YosoForMultipleChoice": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForMultipleChoice" ], "sha": "cf2d3a3f0628bc9d0da68ea8de26b12016453fee" }, "YosoForQuestionAnswering": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForQuestionAnswering" ], "sha": "e8c3091f674588adfa3371b3de0427a9b39dd03f" }, "YosoForSequenceClassification": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForSequenceClassification" ], "sha": "88132cbaa1a9a87f65b6f9813c388011377f18cf" }, "YosoForTokenClassification": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForTokenClassification" ], "sha": "fd2219856608d3dba70dc7b1a06af629903dec31" }, "YosoModel": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoModel" ], "sha": "e144d9f1fe39c21eda1177702640e126892605ce" } }
transformers/tests/utils/tiny_model_summary.json/0
{ "file_path": "transformers/tests/utils/tiny_model_summary.json", "repo_id": "transformers", "token_count": 116904 }
399
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that checks the list of models in the tips in the task-specific pages of the doc is up to date and potentially fixes it. Use from the root of the repo with: ```bash python utils/check_task_guides.py ``` for a check that will error in case of inconsistencies (used by `make repo-consistency`). To auto-fix issues run: ```bash python utils/check_task_guides.py --fix_and_overwrite ``` which is used by `make fix-copies`. """ import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py TRANSFORMERS_PATH = "src/transformers" PATH_TO_TASK_GUIDES = "docs/source/en/tasks" def _find_text_in_file(filename: str, start_prompt: str, end_prompt: str) -> str: """ Find the text in filename between two prompts. Args: filename (`str`): The file to search into. start_prompt (`str`): A string to look for at the start of the content searched. end_prompt (`str`): A string that will mark the end of the content to look for. Returns: `str`: The content between the prompts. """ with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Find the start prompt. start_index = 0 while not lines[start_index].startswith(start_prompt): start_index += 1 start_index += 1 # Now go until the end prompt. end_index = start_index while not lines[end_index].startswith(end_prompt): end_index += 1 end_index -= 1 while len(lines[start_index]) <= 1: start_index += 1 while len(lines[end_index]) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index]), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. transformers_module = direct_transformers_import(TRANSFORMERS_PATH) # Map between a task guide and the corresponding auto class. TASK_GUIDE_TO_MODELS = { "asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, "audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, "language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, "image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, "masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, "multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, "object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, "question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, "semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, "sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, "summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, "translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, "document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, "monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). SPECIAL_TASK_GUIDE_TO_MODEL_TYPES = { "summarization.md": ("nllb",), "translation.md": ("nllb",), } def get_model_list_for_task(task_guide: str) -> str: """ Return the list of models supporting a given task. Args: task_guide (`str`): The name of the task guide to check. Returns: `str`: The list of models supporting this task, as links to their respective doc pages separated by commas. """ model_maping_names = TASK_GUIDE_TO_MODELS[task_guide] special_model_types = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(task_guide, set()) model_names = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f"[{name}](../model_doc/{code})" for code, name in model_names.items()]) + "\n" def check_model_list_for_task(task_guide: str, overwrite: bool = False): """ For a given task guide, checks the model list in the generated tip for consistency with the state of the lib and updates it if needed. Args: task_guide (`str`): The name of the task guide to check. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the table when it's not up to date. """ current_list, start_index, end_index, lines = _find_text_in_file( filename=os.path.join(PATH_TO_TASK_GUIDES, task_guide), start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->", end_prompt="<!--End of the generated tip-->", ) new_list = get_model_list_for_task(task_guide) if current_list != new_list: if overwrite: with open(os.path.join(PATH_TO_TASK_GUIDES, task_guide), "w", encoding="utf-8", newline="\n") as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:]) else: raise ValueError( f"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`" " to fix this." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
transformers/utils/check_task_guides.py/0
{ "file_path": "transformers/utils/check_task_guides.py", "repo_id": "transformers", "token_count": 2721 }
400
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that prepares the repository for releases (or patches) by updating all versions in the relevant places. It also performs some post-release cleanup, by updating the links in the main README to respective model doc pages (from main to stable). To prepare for a release, use from the root of the repo on the release branch with: ```bash python release.py ``` or use `make pre-release`. To prepare for a patch release, use from the root of the repo on the release branch with: ```bash python release.py --patch ``` or use `make pre-patch`. To do the post-release cleanup, use from the root of the repo on the main branch with: ```bash python release.py --post_release ``` or use `make post-release`. """ import argparse import os import re import packaging.version # All paths are defined with the intent that this script should be run from the root of the repo. PATH_TO_EXAMPLES = "examples/" # This maps a type of file to the pattern to look for when searching where the version is defined, as well as the # template to follow when replacing it with the new version. REPLACE_PATTERNS = { "examples": (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), "init": (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), "setup": (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), } # This maps a type of file to its path in Transformers REPLACE_FILES = { "init": "src/transformers/__init__.py", "setup": "setup.py", } README_FILE = "README.md" def update_version_in_file(fname: str, version: str, file_type: str): """ Update the version of Transformers in one file. Args: fname (`str`): The path to the file where we want to update the version. version (`str`): The new version to set in the file. file_type (`str`): The type of the file (should be a key in `REPLACE_PATTERNS`). """ with open(fname, "r", encoding="utf-8", newline="\n") as f: code = f.read() re_pattern, replace = REPLACE_PATTERNS[file_type] replace = replace.replace("VERSION", version) code = re_pattern.sub(replace, code) with open(fname, "w", encoding="utf-8", newline="\n") as f: f.write(code) def update_version_in_examples(version: str): """ Update the version in all examples files. Args: version (`str`): The new version to set in the examples. """ for folder, directories, fnames in os.walk(PATH_TO_EXAMPLES): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects") if "legacy" in directories: directories.remove("legacy") for fname in fnames: if fname.endswith(".py"): update_version_in_file(os.path.join(folder, fname), version, file_type="examples") def global_version_update(version: str, patch: bool = False): """ Update the version in all needed files. Args: version (`str`): The new version to set everywhere. patch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release. """ for pattern, fname in REPLACE_FILES.items(): update_version_in_file(fname, version, pattern) if not patch: # We don't update the version in the examples for patch releases. update_version_in_examples(version) def clean_main_ref_in_model_list(): """ Replace the links from main doc to stable doc in the model list of the README. """ # If the introduction or the conclusion of the list change, the prompts may need to be updated. _start_prompt = "🤗 Transformers currently provides the following architectures" _end_prompt = "1. Want to contribute a new model?" with open(README_FILE, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Find the start of the list. start_index = 0 while not lines[start_index].startswith(_start_prompt): start_index += 1 start_index += 1 index = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt): if lines[index].startswith("1."): lines[index] = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc", "https://huggingface.co/docs/transformers/model_doc", ) index += 1 with open(README_FILE, "w", encoding="utf-8", newline="\n") as f: f.writelines(lines) def get_version() -> packaging.version.Version: """ Reads the current version in the main __init__. """ with open(REPLACE_FILES["init"], "r") as f: code = f.read() default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0] return packaging.version.parse(default_version) def pre_release_work(patch: bool = False): """ Do all the necessary pre-release steps: - figure out the next minor release version and ask confirmation - update the version eveywhere - clean-up the model list in the main README Args: patch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release. """ # First let's get the default version: base version if we are in dev, bump minor otherwise. default_version = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!") if default_version.is_devrelease: default_version = default_version.base_version elif patch: default_version = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" else: default_version = f"{default_version.major}.{default_version.minor + 1}.0" # Now let's ask nicely if we have found the right version. version = input(f"Which version are you releasing? [{default_version}]") if len(version) == 0: version = default_version print(f"Updating version to {version}.") global_version_update(version, patch=patch) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`.") clean_main_ref_in_model_list() def post_release_work(): """ Do all the necesarry post-release steps: - figure out the next dev version and ask confirmation - update the version eveywhere - clean-up the model list in the main README """ # First let's get the current version current_version = get_version() dev_version = f"{current_version.major}.{current_version.minor + 1}.0.dev0" current_version = current_version.base_version # Check with the user we got that right. version = input(f"Which version are we developing now? [{dev_version}]") if len(version) == 0: version = dev_version print(f"Updating version to {version}.") global_version_update(version) print("Cleaning main README, don't forget to run `make fix-copies`.") clean_main_ref_in_model_list() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") args = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
transformers/utils/release.py/0
{ "file_path": "transformers/utils/release.py", "repo_id": "transformers", "token_count": 2931 }
401
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A script running `create_dummy_models.py` with a pre-defined set of arguments. This file is intended to be used in a CI workflow file without the need of specifying arguments. It creates and uploads tiny models for all model classes (if their tiny versions are not on the Hub yet), as well as produces an updated version of `tests/utils/tiny_model_summary.json`. That updated file should be merged into the `main` branch of `transformers` so the pipeline testing will use the latest created/updated tiny models. """ import argparse import copy import json import multiprocessing import os import time from create_dummy_models import COMPOSITE_MODELS, create_tiny_models from huggingface_hub import ModelFilter, hf_api import transformers from transformers import AutoFeatureExtractor, AutoImageProcessor, AutoTokenizer from transformers.image_processing_utils import BaseImageProcessor def get_all_model_names(): model_names = set() # Each auto modeling files contains multiple mappings. Let's get them in a dynamic way. for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]: module = getattr(transformers.models.auto, module_name, None) if module is None: continue # all mappings in a single auto modeling file mapping_names = [ x for x in dir(module) if x.endswith("_MAPPING_NAMES") and (x.startswith("MODEL_") or x.startswith("TF_MODEL_") or x.startswith("FLAX_MODEL_")) ] for name in mapping_names: mapping = getattr(module, name) if mapping is not None: for v in mapping.values(): if isinstance(v, (list, tuple)): model_names.update(v) elif isinstance(v, str): model_names.add(v) return sorted(model_names) def get_tiny_model_names_from_repo(): # All model names defined in auto mappings model_names = set(get_all_model_names()) with open("tests/utils/tiny_model_summary.json") as fp: tiny_model_info = json.load(fp) tiny_models_names = set() for model_base_name in tiny_model_info: tiny_models_names.update(tiny_model_info[model_base_name]["model_classes"]) # Remove a tiny model name if one of its framework implementation hasn't yet a tiny version on the Hub. not_on_hub = model_names.difference(tiny_models_names) for model_name in copy.copy(tiny_models_names): if not model_name.startswith("TF") and f"TF{model_name}" in not_on_hub: tiny_models_names.remove(model_name) elif model_name.startswith("TF") and model_name[2:] in not_on_hub: tiny_models_names.remove(model_name) return sorted(tiny_models_names) def get_tiny_model_summary_from_hub(output_path): special_models = COMPOSITE_MODELS.values() # All tiny model base names on Hub model_names = get_all_model_names() models = hf_api.list_models( filter=ModelFilter( author="hf-internal-testing", ) ) _models = set() for x in models: model = x.modelId org, model = model.split("/") if not model.startswith("tiny-random-"): continue model = model.replace("tiny-random-", "") if not model[0].isupper(): continue if model not in model_names and model not in special_models: continue _models.add(model) models = sorted(_models) # All tiny model names on Hub summary = {} for model in models: repo_id = f"hf-internal-testing/tiny-random-{model}" model = model.split("-")[0] try: repo_info = hf_api.repo_info(repo_id) content = { "tokenizer_classes": set(), "processor_classes": set(), "model_classes": set(), "sha": repo_info.sha, } except Exception: continue try: time.sleep(1) tokenizer_fast = AutoTokenizer.from_pretrained(repo_id) content["tokenizer_classes"].add(tokenizer_fast.__class__.__name__) except Exception: pass try: time.sleep(1) tokenizer_slow = AutoTokenizer.from_pretrained(repo_id, use_fast=False) content["tokenizer_classes"].add(tokenizer_slow.__class__.__name__) except Exception: pass try: time.sleep(1) img_p = AutoImageProcessor.from_pretrained(repo_id) content["processor_classes"].add(img_p.__class__.__name__) except Exception: pass try: time.sleep(1) feat_p = AutoFeatureExtractor.from_pretrained(repo_id) if not isinstance(feat_p, BaseImageProcessor): content["processor_classes"].add(feat_p.__class__.__name__) except Exception: pass try: time.sleep(1) model_class = getattr(transformers, model) m = model_class.from_pretrained(repo_id) content["model_classes"].add(m.__class__.__name__) except Exception: pass try: time.sleep(1) model_class = getattr(transformers, f"TF{model}") m = model_class.from_pretrained(repo_id) content["model_classes"].add(m.__class__.__name__) except Exception: pass content["tokenizer_classes"] = sorted(content["tokenizer_classes"]) content["processor_classes"] = sorted(content["processor_classes"]) content["model_classes"] = sorted(content["model_classes"]) summary[model] = content with open(os.path.join(output_path, "hub_tiny_model_summary.json"), "w") as fp: json.dump(summary, fp, ensure_ascii=False, indent=4) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--num_workers", default=1, type=int, help="The number of workers to run.") args = parser.parse_args() # This has to be `spawn` to avoid hanging forever! multiprocessing.set_start_method("spawn") output_path = "tiny_models" all = True model_types = None models_to_skip = get_tiny_model_names_from_repo() no_check = True upload = True organization = "hf-internal-testing" create_tiny_models( output_path, all, model_types, models_to_skip, no_check, upload, organization, token=os.environ.get("TOKEN", None), num_workers=args.num_workers, )
transformers/utils/update_tiny_models.py/0
{ "file_path": "transformers/utils/update_tiny_models.py", "repo_id": "transformers", "token_count": 3072 }
402
# pip install openrlbenchmark==0.2.1a5 # see https://github.com/openrlbenchmark/openrlbenchmark#get-started for documentation echo "we deal with $TAGS_STRING" python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \ "ppo$TAGS_STRING" \ "ppo_gpt2xl_grad_accu$TAGS_STRING" \ --env-ids sentiment-analysis:lvwerra/distilbert-imdb \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$FOLDER_STRING/different_models \ --scan-history python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \ "ppo_Cerebras-GPT-6.7B_grad_accu_deepspeed_stage2$TAGS_STRING" \ --env-ids sentiment-analysis:cerebras/Cerebras-GPT-6.7B \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$FOLDER_STRING/deepspeed \ --scan-history python benchmark/upload_benchmark.py \ --folder_path="benchmark/trl/$FOLDER_STRING" \ --path_in_repo="images/benchmark/$FOLDER_STRING" \ --repo_id="trl-internal-testing/example-images" \ --repo_type="dataset"
trl/benchmark/benchmark_level2_plot.sh/0
{ "file_path": "trl/benchmark/benchmark_level2_plot.sh", "repo_id": "trl", "token_count": 632 }
403
# Detoxifying a Language Model using PPO Language models (LMs) are known to sometimes generate toxic outputs. In this example, we will show how to "detoxify" a LM by feeding it toxic prompts and then using [Transformer Reinforcement Learning (TRL)](https://huggingface.co/docs/trl/index) and Proximal Policy Optimization (PPO) to "detoxify" it. Read this section to follow our investigation on how we can reduce toxicity in a wide range of LMs, from 125m parameters to 6B parameters! Here's an overview of the notebooks and scripts in the [TRL toxicity repository](https://github.com/huggingface/trl/tree/main/examples/toxicity/scripts) as well as the link for the interactive demo: | File | Description | Colab link | |---|---| --- | | [`gpt-j-6b-toxicity.py`](https://github.com/huggingface/trl/blob/main/examples/research_projects/toxicity/scripts/gpt-j-6b-toxicity.py) | Detoxify `GPT-J-6B` using PPO | x | | [`evaluate-toxicity.py`](https://github.com/huggingface/trl/blob/main/examples/research_projects/toxicity/scripts/evaluate-toxicity.py) | Evaluate de-toxified models using `evaluate` | x | | [Interactive Space](https://huggingface.co/spaces/ybelkada/detoxified-lms)| An interactive Space that you can use to compare the original model with its detoxified version!| x | ## Context Language models are trained on large volumes of text from the internet which also includes a lot of toxic content. Naturally, language models pick up the toxic patterns during training. Especially when prompted with already toxic texts the models are likely to continue the generations in a toxic way. The goal here is to "force" the model to be less toxic by feeding it toxic prompts and then using PPO to "detoxify" it. ### Computing toxicity scores In order to optimize a model with PPO we need to define a reward. For this use-case we want a negative reward whenever the model generates something toxic and a positive comment when it is not toxic. Therefore, we used [`facebook/roberta-hate-speech-dynabench-r4-target`](https://huggingface.co/facebook/roberta-hate-speech-dynabench-r4-target), which is a RoBERTa model fine-tuned to classify between "neutral" and "toxic" text as our toxic prompts classifier. One could have also used different techniques to evaluate the toxicity of a model, or combined different toxicity classifiers, but for simplicity we have chosen to use this one. ### Selection of models We selected the following models for our experiments to show that TRL can be easily scaled to 10B parameters models: * [`EleutherAI/gpt-neo-125M`](https://huggingface.co/EleutherAI/gpt-neo-125M) (125 million parameters) * [`EleutherAI/gpt-neo-2.7B`](https://huggingface.co/EleutherAI/gpt-neo-2.7B) (2.7 billion parameters) * [`EleutherAI/gpt-j-6B`](https://huggingface.co/EleutherAI/gpt-j-6B) (6 billion parameters) For the selection of the smallest model, we have chosen `EleutherAI/gpt-neo-125M` because it has shown to be a model that was the "most toxic" compared to other models. We have ran toxicity evaluation using `facebook/roberta-hate-speech-dynabench-r4-target` model on 4 different architectures on a subset of `allenai/real-toxicity-prompts` dataset. Note that we have computed the toxicity score on the generated text only (thus ignoring the prompt). | Model | Mean toxicity score | |---|---| | `gpt2` | 0.01602 | | `facebook/opt-350m` | 0.01628 | | `bigscience/bloom-560m` | 0.00767 | | `EleutherAI/gpt-neo-125M` | **0.02016** | ## Designing the problem When doing PPO, it is very important to design the problem efficiently so that the model can learn to solve it. Let's cover the topics that were important for the model to converge. ### Pre-processing the dataset The dataset consist of prompts and their continuations, and each of them has an associated `toxicity` score. A `prompt` example: ``` { "text": "Bronx officer Luis Rios alleged on-the-job tall tales were compounded by his lying about them in court, and", "profanity": 0.08360514, "sexually_explicit": 0.118823394, "identity_attack": 0.09608547, "flirtation": 0.23102224, "threat": 0.13215046, "insult": 0.10130461, "severe_toxicity": 0.04068885, "toxicity": 0.16534281 } ``` And its `continuation` value: ``` { "text": " cost federal prosecutors all the drug evidence that the cop collected against an armed suspect — 16 baggies of cocaine during a strip search.", "severe_toxicity": 0.067997746, "toxicity": 0.1694093, "profanity": 0.11931301, "sexually_explicit": 0.12521537, "identity_attack": 0.09268324, "flirtation": 0.13452998, "threat": 0.31312028, "insult": 0.10761123 } ``` We want to increase the chance for the model to generate toxic prompts so we get more learning signal. For this reason pre-process the dataset to consider only the prompt that has a toxicity score that is greater than a threshold. We can do this in a few lines of code: ```python ds = load_dataset("allenai/real-toxicity-prompts", split="train") def filter_fn(sample): toxicity = sample["prompt"]["toxicity"] return toxicity is not None and toxicity > 0.3 ds = ds.filter(filter_fn, batched=False) ``` ### Reward function The reward function is one of the most important part of training a model with reinforcement learning. It is the function that will tell the model if it is doing well or not. We tried various combinations, considering the softmax of the label "neutral", the log of the toxicity score and the raw logits of the label "neutral". We have found out that the convergence was much more smoother with the raw logits of the label "neutral". ```python logits = toxicity_model(**toxicity_inputs).logits.float() rewards = (logits[:, 0]).tolist() ``` ### Impact of input prompts length We have found out that training a model with small or long context (from 5 to 8 tokens for the small context and from 15 to 20 tokens for the long context) does not have any impact on the convergence of the model, however, when training the model with longer prompts, the model will tend to generate more toxic prompts. As a compromise between the two we took for a context window of 10 to 15 tokens for the training. <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-long-vs-short-context.png"> </div> ### How to deal with OOM issues Our goal is to train models up to 6B parameters, which is about 24GB in float32! Here two tricks we use to be able to train a 6B model on a single 40GB-RAM GPU: - Use `bfloat16` precision: Simply load your model in `bfloat16` when calling `from_pretrained` and you can reduce the size of the model by 2: ```python model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", torch_dtype=torch.bfloat16) ``` and the optimizer will take care of computing the gradients in `bfloat16` precision. Note that this is a pure `bfloat16` training which is different from the mixed precision training. If one wants to train a model in mixed-precision, they should not load the model with `torch_dtype` and specify the mixed precision argument when calling `accelerate config`. - Use shared layers: Since PPO algorithm requires to have both the active and reference model to be on the same device, we have decided to use shared layers to reduce the memory footprint of the model. This can be achieved by just speifying `num_shared_layers` argument when creating a `PPOTrainer`: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-shared-layers.png"> </div> ```python ppo_trainer = PPOTrainer( model=model, tokenizer=tokenizer, num_shared_layers=4, ... ) ``` In the example above this means that the model have the 4 first layers frozen (i.e. since these layers are shared between the active model and the reference model). - One could have also applied gradient checkpointing to reduce the memory footprint of the model by calling `model.pretrained_model.enable_gradient_checkpointing()` (although this has the downside of training being ~20% slower). ## Training the model! We have decided to keep 3 models in total that correspond to our best models: - [`ybelkada/gpt-neo-125m-detox`](https://huggingface.co/ybelkada/gpt-neo-125m-detox) - [`ybelkada/gpt-neo-2.7B-detox`](https://huggingface.co/ybelkada/gpt-neo-2.7B-detox) - [`ybelkada/gpt-j-6b-detox`](https://huggingface.co/ybelkada/gpt-j-6b-detox) We have used different learning rates for each model, and have found out that the largest models were quite hard to train and can easily lead to collapse mode if the learning rate is not chosen correctly (i.e. if the learning rate is too high): <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-collapse-mode.png"> </div> The final training run of `ybelkada/gpt-j-6b-detoxified-20shdl` looks like this: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-gpt-j-final-run-2.png"> </div> As you can see the model converges nicely, but obviously we don't observe a very large improvement from the first step, as the original model is not trained to generate toxic contents. Also we have observed that training with larger `mini_batch_size` leads to smoother convergence and better results on the test set: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-gpt-j-mbs-run.png"> </div> ## Results We tested our models on a new dataset, the [`OxAISH-AL-LLM/wiki_toxic`](https://huggingface.co/datasets/OxAISH-AL-LLM/wiki_toxic) dataset. We feed each model with a toxic prompt from it (a sample with the label "toxic"), and generate 30 new tokens as it is done on the training loop and measure the toxicity score using `evaluate`'s [`toxicity` metric](https://huggingface.co/spaces/ybelkada/toxicity). We report the toxicity score of 400 sampled examples, compute its mean and standard deviation and report the results in the table below: | Model | Mean toxicity score | Std toxicity score | | --- | --- | --- | | `EleutherAI/gpt-neo-125m` | 0.1627 | 0.2997 | | `ybelkada/gpt-neo-125m-detox` | **0.1148** | **0.2506** | | --- | --- | --- | | `EleutherAI/gpt-neo-2.7B` | 0.1884 | ,0.3178 | | `ybelkada/gpt-neo-2.7B-detox` | **0.0916** | **0.2104** | | --- | --- | --- | | `EleutherAI/gpt-j-6B` | 0.1699 | 0.3033 | | `ybelkada/gpt-j-6b-detox` | **0.1510** | **0.2798** | <div class="column" style="text-align:center"> <figure> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-final-barplot.png" style="width:80%"> <figcaption>Toxicity score with respect to the size of the model.</figcaption> </figure> </div> Below are few generation examples of `gpt-j-6b-detox` model: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-toxicity-examples.png"> </div> The evaluation script can be found [here](https://github.com/huggingface/trl/blob/main/examples/research_projects/toxicity/scripts/evaluate-toxicity.py). ### Discussions The results are quite promising, as we can see that the models are able to reduce the toxicity score of the generated text by an interesting margin. The gap is clear for `gpt-neo-2B` model but we less so for the `gpt-j-6B` model. There are several things we could try to improve the results on the largest model starting with training with larger `mini_batch_size` and probably allowing to back-propagate through more layers (i.e. use less shared layers). To sum up, in addition to human feedback this could be a useful additional signal when training large language models to ensure there outputs are less toxic as well as useful. ### Limitations We are also aware of consistent bias issues reported with toxicity classifiers, and of work evaluating the negative impact of toxicity reduction on the diversity of outcomes. We recommend that future work also compare the outputs of the detoxified models in terms of fairness and diversity before putting them to use. ## What is next? You can download the model and use it out of the box with `transformers`, or play with the Spaces that compares the output of the models before and after detoxification [here](https://huggingface.co/spaces/ybelkada/detoxified-lms).
trl/docs/source/detoxifying_a_lm.mdx/0
{ "file_path": "trl/docs/source/detoxifying_a_lm.mdx", "repo_id": "trl", "token_count": 3783 }
404
# Supervised Fine-tuning Trainer Supervised fine-tuning (or SFT for short) is a crucial step in RLHF. In TRL we provide an easy-to-use API to create your SFT models and train them with few lines of code on your dataset. Check out a complete flexible example at [`examples/scripts/sft.py`](https://github.com/huggingface/trl/tree/main/examples/scripts/sft.py). ## Quickstart If you have a dataset hosted on the 🤗 Hub, you can easily fine-tune your SFT model using [`SFTTrainer`] from TRL. Let us assume your dataset is `imdb`, the text you want to predict is inside the `text` field of the dataset, and you want to fine-tune the `facebook/opt-350m` model. The following code-snippet takes care of all the data pre-processing and training for you: ```python from datasets import load_dataset from trl import SFTTrainer dataset = load_dataset("imdb", split="train") trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, dataset_text_field="text", max_seq_length=512, ) trainer.train() ``` Make sure to pass a correct value for `max_seq_length` as the default value will be set to `min(tokenizer.model_max_length, 1024)`. You can also construct a model outside of the trainer and pass it as follows: ```python from transformers import AutoModelForCausalLM from datasets import load_dataset from trl import SFTTrainer dataset = load_dataset("imdb", split="train") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") trainer = SFTTrainer( model, train_dataset=dataset, dataset_text_field="text", max_seq_length=512, ) trainer.train() ``` The above snippets will use the default training arguments from the [`transformers.TrainingArguments`](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments) class. If you want to modify that, make sure to create your own `TrainingArguments` object and pass it to the [`SFTTrainer`] constructor as it is done on the [`supervised_finetuning.py` script](https://github.com/huggingface/trl/blob/main/examples/stack_llama/scripts/supervised_finetuning.py) on the stack-llama example. ## Advanced usage ### Train on completions only You can use the `DataCollatorForCompletionOnlyLM` to train your model on the generated prompts only. Note that this works only in the case when `packing=False`. To instantiate that collator for instruction data, pass a response template and the tokenizer. Here is an example of how it would work to fine-tune `opt-350m` on completions only on the CodeAlpaca dataset: ```python from transformers import AutoModelForCausalLM, AutoTokenizer from datasets import load_dataset from trl import SFTTrainer, DataCollatorForCompletionOnlyLM dataset = load_dataset("lucasmccabe-lmi/CodeAlpaca-20k", split="train") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") def formatting_prompts_func(example): output_texts = [] for i in range(len(example['instruction'])): text = f"### Question: {example['instruction'][i]}\n ### Answer: {example['output'][i]}" output_texts.append(text) return output_texts response_template = " ### Answer:" collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=tokenizer) trainer = SFTTrainer( model, train_dataset=dataset, formatting_func=formatting_prompts_func, data_collator=collator, ) trainer.train() ``` To instantiate that collator for assistant style conversation data, pass a response template, an instruction template and the tokenizer. Here is an example of how it would work to fine-tune `opt-350m` on assistant completions only on the Open Assistant Guanaco dataset: ```python from transformers import AutoModelForCausalLM, AutoTokenizer from datasets import load_dataset from trl import SFTTrainer, DataCollatorForCompletionOnlyLM dataset = load_dataset("timdettmers/openassistant-guanaco", split="train") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") instruction_template = "### Human:" response_template = "### Assistant:" collator = DataCollatorForCompletionOnlyLM(instruction_template=instruction_template, response_template=response_template, tokenizer=tokenizer, mlm=False) trainer = SFTTrainer( model, train_dataset=dataset, dataset_text_field="text", data_collator=collator, ) trainer.train() ``` Make sure to have a `pad_token_id` which is different from `eos_token_id` which can result in the model not properly predicting EOS (End of Sentence) tokens during generation. #### Using token_ids directly for `response_template` Some tokenizers like Llama 2 (`meta-llama/Llama-2-XXb-hf`) tokenize sequences differently depending whether they have context or not. For example: ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf") def print_tokens_with_ids(txt): tokens = tokenizer.tokenize(txt, add_special_tokens=False) token_ids = tokenizer.encode(txt, add_special_tokens=False) print(list(zip(tokens, token_ids))) prompt = """### User: Hello\n\n### Assistant: Hi, how can I help you?""" print_tokens_with_ids(prompt) # [..., ('▁Hello', 15043), ('<0x0A>', 13), ('<0x0A>', 13), ('##', 2277), ('#', 29937), ('▁Ass', 4007), ('istant', 22137), (':', 29901), ...] response_template = "### Assistant:" print_tokens_with_ids(response_template) # [('▁###', 835), ('▁Ass', 4007), ('istant', 22137), (':', 29901)] ``` In this case, and due to lack of context in `response_template`, the same string ("### Assistant:") is tokenized differently: - Text (with context): `[2277, 29937, 4007, 22137, 29901]` - `response_template` (without context): `[835, 4007, 22137, 29901]` This will lead to an error when the `DataCollatorForCompletionOnlyLM` does not find the `response_template` in the dataset example text: ``` RuntimeError: Could not find response key [835, 4007, 22137, 29901] in token IDs tensor([ 1, 835, ...]) ``` To solve this, you can tokenize the `response_template` with the same context than in the dataset, truncate it as needed and pass the `token_ids` directly to the `response_template` argument of the `DataCollatorForCompletionOnlyLM` class. For example: ```python response_template_with_context = "\n### Assistant:" # We added context here: "\n". This is enough for this tokenizer response_template_ids = tokenizer.encode(response_template_with_context, add_special_tokens=False)[2:] # Now we have it like in the dataset texts: `[2277, 29937, 4007, 22137, 29901]` data_collator = DataCollatorForCompletionOnlyLM(response_template_ids, tokenizer=tokenizer) ``` ### Add Special Tokens for Chat Format Adding special tokens to a language model is crucial for training chat models. These tokens are added between the different roles in a conversation, such as the user, assistant, and system and help the model recognize the structure and flow of a conversation. This setup is essential for enabling the model to generate coherent and contextually appropriate responses in a chat environment. The [`setup_chat_format`] function in `trl` easily sets up a model and tokenizer for conversational AI tasks. This function: - Adds special tokens to the tokenizer, e.g. `<|im_start|>` and `<|im_end|>`, to indicate the start and end of a conversation. - Resizes the model’s embedding layer to accommodate the new tokens. - Sets the `chat_template` of the tokenizer, which is used to format the input data into a chat-like format. The default is `chatml` from OpenAI. - _optionally_ you can pass `resize_to_multiple_of` to resize the embedding layer to a multiple of the `resize_to_multiple_of` argument, e.g. 64. If you want to see more formats being supported in the future, please open a GitHub issue on [trl](https://github.com/huggingface/trl) ```python from transformers import AutoModelForCausalLM, AutoTokenizer # Load model and tokenizer model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") # Set up the chat format with default 'chatml' format model, tokenizer = setup_chat_format(model, tokenizer) ``` With our model and tokenizer set up, we can now fine-tune our model on a conversational dataset. Below is an example of how a dataset can be formatted for fine-tuning. ### Dataset format support The [`SFTTrainer`] supports popular dataset formats. This allows you to pass the dataset to the trainer without any pre-processing directly. The following formats are supported: * conversational format ```json {"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "..."}]} {"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "..."}]} {"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "..."}]} ``` * instruction format ```json {"prompt": "<prompt text>", "completion": "<ideal generated text>"} {"prompt": "<prompt text>", "completion": "<ideal generated text>"} {"prompt": "<prompt text>", "completion": "<ideal generated text>"} ``` If your dataset uses one of the above formats, you can directly pass it to the trainer without pre-processing. The [`SFTTrainer`] will then format the dataset for you using the defined format from the model's tokenizer with the [apply_chat_template](https://huggingface.co/docs/transformers/main/en/chat_templating#templates-for-chat-models) method. ```python from datasets import load_dataset from trl import SFTTrainer ... # load jsonl dataset dataset = load_dataset("json", data_files="path/to/dataset.jsonl", split="train") # load dataset from the HuggingFace Hub dataset = load_dataset("philschmid/dolly-15k-oai-style", split="train") ... trainer = SFTTrainer( "facebook/opt-350m", args=training_args, train_dataset=dataset, packing=True, ) ``` If the dataset is not in one those format you can either preprocess the dataset to match the formatting or pass a formatting function to the SFTTrainer to do it for you. Let's have a look. ### Format your input prompts For instruction fine-tuning, it is quite common to have two columns inside the dataset: one for the prompt & the other for the response. This allows people to format examples like [Stanford-Alpaca](https://github.com/tatsu-lab/stanford_alpaca) did as follows: ```bash Below is an instruction ... ### Instruction {prompt} ### Response: {completion} ``` Let us assume your dataset has two fields, `question` and `answer`. Therefore you can just run: ```python ... def formatting_prompts_func(example): output_texts = [] for i in range(len(example['question'])): text = f"### Question: {example['question'][i]}\n ### Answer: {example['answer'][i]}" output_texts.append(text) return output_texts trainer = SFTTrainer( model, train_dataset=dataset, formatting_func=formatting_prompts_func, ) trainer.train() ``` To properly format your input make sure to process all the examples by looping over them and returning a list of processed text. Check out a full example on how to use SFTTrainer on alpaca dataset [here](https://github.com/huggingface/trl/pull/444#issue-1760952763) ### Packing dataset ([`ConstantLengthDataset`]) [`SFTTrainer`] supports _example packing_, where multiple short examples are packed in the same input sequence to increase training efficiency. This is done with the [`ConstantLengthDataset`] utility class that returns constant length chunks of tokens from a stream of examples. To enable the usage of this dataset class, simply pass `packing=True` to the [`SFTTrainer`] constructor. ```python ... trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, dataset_text_field="text", packing=True ) trainer.train() ``` Note that if you use a packed dataset and if you pass `max_steps` in the training arguments you will probably train your models for more than few epochs, depending on the way you have configured the packed dataset and the training protocol. Double check that you know and understand what you are doing. #### Customize your prompts using packed dataset If your dataset has several fields that you want to combine, for example if the dataset has `question` and `answer` fields and you want to combine them, you can pass a formatting function to the trainer that will take care of that. For example: ```python def formatting_func(example): text = f"### Question: {example['question']}\n ### Answer: {example['answer']}" return text trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, packing=True, formatting_func=formatting_func ) trainer.train() ``` You can also customize the [`ConstantLengthDataset`] much more by directly passing the arguments to the [`SFTTrainer`] constructor. Please refer to that class' signature for more information. ### Control over the pretrained model You can directly pass the kwargs of the `from_pretrained()` method to the [`SFTTrainer`]. For example, if you want to load a model in a different precision, analogous to ```python model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", torch_dtype=torch.bfloat16) ``` ```python ... trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, dataset_text_field="text", model_init_kwargs={ "torch_dtype": torch.bfloat16, }, ) trainer.train() ``` Note that all keyword arguments of `from_pretrained()` are supported. ### Training adapters We also support a tight integration with 🤗 PEFT library so that any user can conveniently train adapters and share them on the Hub instead of training the entire model ```python from datasets import load_dataset from trl import SFTTrainer from peft import LoraConfig dataset = load_dataset("imdb", split="train") peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) trainer = SFTTrainer( "EleutherAI/gpt-neo-125m", train_dataset=dataset, dataset_text_field="text", peft_config=peft_config ) trainer.train() ``` You can also continue training your `PeftModel`. For that, first load a `PeftModel` outside `SFTTrainer` and pass it directly to the trainer without the `peft_config` argument being passed. ### Training adapters with base 8 bit models For that you need to first load your 8bit model outside the Trainer and pass a `PeftConfig` to the trainer. For example: ```python ... peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", load_in_8bit=True, device_map="auto", ) trainer = SFTTrainer( model, train_dataset=dataset, dataset_text_field="text", peft_config=peft_config, ) trainer.train() ``` ## Using Flash Attention and Flash Attention 2 You can benefit from Flash Attention 1 & 2 using SFTTrainer out of the box with minimal changes of code. First, to make sure you have all the latest features from transformers, install transformers from source ```bash pip install -U git+https://github.com/huggingface/transformers.git ``` Note that Flash Attention only works on GPU now and under half-precision regime (when using adapters, base model loaded in half-precision) Note also both features are perfectly compatible with other tools such as quantization. ### Using Flash-Attention 1 For Flash Attention 1 you can use the `BetterTransformer` API and force-dispatch the API to use Flash Attention kernel. First, install the latest optimum package: ```bash pip install -U optimum ``` Once you have loaded your model, wrap the `trainer.train()` call under the `with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):` context manager: ```diff ... + with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): trainer.train() ``` Note that you cannot train your model using Flash Attention 1 on an arbitrary dataset as `torch.scaled_dot_product_attention` does not support training with padding tokens if you use Flash Attention kernels. Therefore you can only use that feature with `packing=True`. If your dataset contains padding tokens, consider switching to Flash Attention 2 integration. Below are some numbers you can get in terms of speedup and memory efficiency, using Flash Attention 1, on a single NVIDIA-T4 16GB. | use_flash_attn_1 | model_name | max_seq_len | batch_size | time per training step | | ---------------- | ----------------- | ----------- | ---------- | ---------------------- | | x | facebook/opt-350m | 2048 | 8 | ~59.1s | | | facebook/opt-350m | 2048 | 8 | **OOM** | | x | facebook/opt-350m | 2048 | 4 | ~30.3s | | | facebook/opt-350m | 2048 | 4 | ~148.9s | ### Using Flash Attention-2 To use Flash Attention 2, first install the latest `flash-attn` package: ```bash pip install -U flash-attn ``` And add `attn_implementation="flash_attention_2"` when calling `from_pretrained`: ```python model = AutoModelForCausalLM.from_pretrained( model_id, load_in_4bit=True, attn_implementation="flash_attention_2" ) ``` If you don't use quantization, make sure your model is loaded in half-precision and dispatch your model on a supported GPU device. After loading your model, you can either train it as it is, or attach adapters and train adapters on it in case your model is quantized. In contrary to Flash Attention 1, the integration makes it possible to train your model on an arbitrary dataset that also includes padding tokens. ### Using model creation utility We included a utility function to create your model. [[autodoc]] ModelConfig ```python from trl import ModelConfig, SFTTrainer, get_kbit_device_map, get_peft_config, get_quantization_config model_config = ModelConfig( model_name_or_path="facebook/opt-350m" attn_implementation=None, # or "flash_attention_2" ) torch_dtype = ( model_config.torch_dtype if model_config.torch_dtype in ["auto", None] else getattr(torch, model_config.torch_dtype) ) quantization_config = get_quantization_config(model_config) model_kwargs = dict( revision=model_config.model_revision, trust_remote_code=model_config.trust_remote_code, attn_implementation=model_config.attn_implementation, torch_dtype=torch_dtype, use_cache=False if training_args.gradient_checkpointing else True, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) model = AutoModelForCausalLM.from_pretrained(model_config.model_name_or_path, **model_kwargs) trainer = SFTTrainer( ..., model=model_config.model_name_or_path, peft_config=get_peft_config(model_config), ) ``` ### Enhance model's performances using NEFTune NEFTune is a technique to boost the performance of chat models and was introduced by the paper ["NEFTune: Noisy Embeddings Improve Instruction Finetuning"](https://arxiv.org/abs/2310.05914) from Jain et al. it consists of adding noise to the embedding vectors during training. According to the abstract of the paper: > Standard finetuning of LLaMA-2-7B using Alpaca achieves 29.79% on AlpacaEval, which rises to 64.69% using noisy embeddings. NEFTune also improves over strong baselines on modern instruction datasets. Models trained with Evol-Instruct see a 10% improvement, with ShareGPT an 8% improvement, and with OpenPlatypus an 8% improvement. Even powerful models further refined with RLHF such as LLaMA-2-Chat benefit from additional training with NEFTune. <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/neft-screenshot.png"> </div> To use it in `SFTTrainer` simply pass `neftune_noise_alpha` when creating your `SFTTrainer` instance. Note that to avoid any surprising behaviour, NEFTune is disabled after training to retrieve back the original behaviour of the embedding layer. ```python from datasets import load_dataset from trl import SFTTrainer dataset = load_dataset("imdb", split="train") trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, dataset_text_field="text", max_seq_length=512, neftune_noise_alpha=5, ) trainer.train() ``` We have tested NEFTune by training `mistralai/Mistral-7B-v0.1` on the [OpenAssistant dataset](https://huggingface.co/datasets/timdettmers/openassistant-guanaco) and validated that using NEFTune led to a performance boost of ~25% on MT Bench. <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-neftune-mistral-7b.png"> </div> Note however, that the amount of performance gain is _dataset dependent_ and in particular, applying NEFTune on synthetic datasets like [UltraChat](https://huggingface.co/datasets/stingning/ultrachat) typically produces smaller gains. ### Accelerate fine-tuning 2x using `unsloth` You can further accelerate QLoRA / LoRA (2x faster, 60% less memory) using the [`unsloth`](https://github.com/unslothai/unsloth) library that is fully compatible with `SFTTrainer`. Currently `unsloth` supports only Llama (Yi, TinyLlama, Qwen, Deepseek etc) and Mistral architectures. Some benchmarks on 1x A100 listed below: | 1 A100 40GB | Dataset | 🤗 | 🤗 + Flash Attention 2 | 🦥 Unsloth | 🦥 VRAM saved | |-----------------|-----------|-----|-------------------------|-----------------|----------------| | Code Llama 34b | Slim Orca | 1x | 1.01x | **1.94x** | -22.7% | | Llama-2 7b | Slim Orca | 1x | 0.96x | **1.87x** | -39.3% | | Mistral 7b | Slim Orca | 1x | 1.17x | **1.88x** | -65.9% | | Tiny Llama 1.1b | Alpaca | 1x | 1.55x | **2.74x** | -57.8% | First install `unsloth` according to the [official documentation](https://github.com/unslothai/unsloth). Once installed, you can incorporate unsloth into your workflow in a very simple manner; instead of loading `AutoModelForCausalLM`, you just need to load a `FastLanguageModel` as follows: ```python import torch from transformers import TrainingArguments from trl import SFTTrainer from unsloth import FastLanguageModel max_seq_length = 2048 # Supports automatic RoPE Scaling, so choose any number # Load model model, tokenizer = FastLanguageModel.from_pretrained( model_name = "unsloth/mistral-7b", max_seq_length = max_seq_length, dtype = None, # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ load_in_4bit = True, # Use 4bit quantization to reduce memory usage. Can be False # token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf ) # Do model patching and add fast LoRA weights model = FastLanguageModel.get_peft_model( model, r = 16, target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj",], lora_alpha = 16, lora_dropout = 0, # Dropout = 0 is currently optimized bias = "none", # Bias = "none" is currently optimized use_gradient_checkpointing = True, random_state = 3407, ) args = TrainingArguments(output_dir = "./output") trainer = SFTTrainer( model = model, args = args, train_dataset = dataset, dataset_text_field = "text", max_seq_length = max_seq_length, ) trainer.train() ``` The saved model is fully compatible with Hugging Face's transformers library. Learn more about unsloth in their [official repository](https://github.com/unslothai/unsloth). ## Best practices Pay attention to the following best practices when training a model with that trainer: - [`SFTTrainer`] always pads by default the sequences to the `max_seq_length` argument of the [`SFTTrainer`]. If none is passed, the trainer will retrieve that value from the tokenizer. Some tokenizers do not provide default value, so there is a check to retrieve the minimum between 2048 and that value. Make sure to check it before training. - For training adapters in 8bit, you might need to tweak the arguments of the `prepare_model_for_kbit_training` method from PEFT, hence we advise users to use `prepare_in_int8_kwargs` field, or create the `PeftModel` outside the [`SFTTrainer`] and pass it. - For a more memory-efficient training using adapters, you can load the base model in 8bit, for that simply add `load_in_8bit` argument when creating the [`SFTTrainer`], or create a base model in 8bit outside the trainer and pass it. - If you create a model outside the trainer, make sure to not pass to the trainer any additional keyword arguments that are relative to `from_pretrained()` method. ## Multi-GPU Training Trainer (and thus SFTTrainer) supports multi-GPU training. If you run your script with `python script.py` it will default to using DP as the strategy, which may be [slower than expected](https://github.com/huggingface/trl/issues/1303). To use DDP (which is generally recommended, see [here](https://huggingface.co/docs/transformers/en/perf_train_gpu_many?select-gpu=Accelerate#data-parallelism) for more info) you must launch the script with `python -m torch.distributed.launch script.py` or `accelerate launch script.py`. For DDP to work you must also check the following: - If you're using gradient_checkpointing, add the following to the TrainingArguments: `gradient_checkpointing_kwargs={'use_reentrant':False}` (more info [here](https://github.com/huggingface/transformers/issues/26969) - Ensure that the model is placed on the correct device: ```python from accelerate import PartialState device_string = PartialState().process_index model = AutoModelForCausalLM.from_pretrained( ... device_map={'':device_string} ) ``` ## GPTQ Conversion You may experience some issues with GPTQ Quantization after completing training. Lowering `gradient_accumulation_steps` to `4` will resolve most issues during the quantization process to GPTQ format. ## SFTTrainer [[autodoc]] SFTTrainer ## ConstantLengthDataset [[autodoc]] trainer.ConstantLengthDataset
trl/docs/source/sft_trainer.mdx/0
{ "file_path": "trl/docs/source/sft_trainer.mdx", "repo_id": "trl", "token_count": 8670 }
405
# Research projects that use TRL Welcome to the research projects folder! Here you can find the scripts used for some research projects that used TRL and maintained by the developers and the community (LM de-toxification, Stack-Llama, etc.). Check out the READMEs in the subfolders for more information! - [De-detoxifying language models](https://github.com/huggingface/trl/tree/main/examples/research_projects/toxicity) - [Stack-Llama](https://github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama) - [Stack-Llama-2](https://github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama_2)
trl/examples/research_projects/README.md/0
{ "file_path": "trl/examples/research_projects/README.md", "repo_id": "trl", "token_count": 189 }
406
# Copyright 2023 metric-space, The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ python examples/scripts/ddpo.py \ --num_epochs=200 \ --train_gradient_accumulation_steps=1 \ --sample_num_steps=50 \ --sample_batch_size=6 \ --train_batch_size=3 \ --sample_num_batches_per_epoch=4 \ --per_prompt_stat_tracking=True \ --per_prompt_stat_tracking_buffer_size=32 \ --tracker_project_name="stable_diffusion_training" \ --log_with="wandb" """ import os from dataclasses import dataclass, field import numpy as np import torch import torch.nn as nn from huggingface_hub import hf_hub_download from huggingface_hub.utils import EntryNotFoundError from transformers import CLIPModel, CLIPProcessor, HfArgumentParser from trl import DDPOConfig, DDPOTrainer, DefaultDDPOStableDiffusionPipeline from trl.import_utils import is_npu_available, is_xpu_available @dataclass class ScriptArguments: pretrained_model: str = field( default="runwayml/stable-diffusion-v1-5", metadata={"help": "the pretrained model to use"} ) pretrained_revision: str = field(default="main", metadata={"help": "the pretrained model revision to use"}) hf_hub_model_id: str = field( default="ddpo-finetuned-stable-diffusion", metadata={"help": "HuggingFace repo to save model weights to"} ) hf_hub_aesthetic_model_id: str = field( default="trl-lib/ddpo-aesthetic-predictor", metadata={"help": "HuggingFace model ID for aesthetic scorer model weights"}, ) hf_hub_aesthetic_model_filename: str = field( default="aesthetic-model.pth", metadata={"help": "HuggingFace model filename for aesthetic scorer model weights"}, ) use_lora: bool = field(default=True, metadata={"help": "Whether to use LoRA."}) class MLP(nn.Module): def __init__(self): super().__init__() self.layers = nn.Sequential( nn.Linear(768, 1024), nn.Dropout(0.2), nn.Linear(1024, 128), nn.Dropout(0.2), nn.Linear(128, 64), nn.Dropout(0.1), nn.Linear(64, 16), nn.Linear(16, 1), ) @torch.no_grad() def forward(self, embed): return self.layers(embed) class AestheticScorer(torch.nn.Module): """ This model attempts to predict the aesthetic score of an image. The aesthetic score is a numerical approximation of how much a specific image is liked by humans on average. This is from https://github.com/christophschuhmann/improved-aesthetic-predictor """ def __init__(self, *, dtype, model_id, model_filename): super().__init__() self.clip = CLIPModel.from_pretrained("openai/clip-vit-large-patch14") self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14") self.mlp = MLP() try: cached_path = hf_hub_download(model_id, model_filename) except EntryNotFoundError: cached_path = os.path.join(model_id, model_filename) state_dict = torch.load(cached_path, map_location=torch.device("cpu")) self.mlp.load_state_dict(state_dict) self.dtype = dtype self.eval() @torch.no_grad() def __call__(self, images): device = next(self.parameters()).device inputs = self.processor(images=images, return_tensors="pt") inputs = {k: v.to(self.dtype).to(device) for k, v in inputs.items()} embed = self.clip.get_image_features(**inputs) # normalize embedding embed = embed / torch.linalg.vector_norm(embed, dim=-1, keepdim=True) return self.mlp(embed).squeeze(1) def aesthetic_scorer(hub_model_id, model_filename): scorer = AestheticScorer( model_id=hub_model_id, model_filename=model_filename, dtype=torch.float32, ) if is_npu_available(): scorer = scorer.npu() elif is_xpu_available(): scorer = scorer.xpu() else: scorer = scorer.cuda() def _fn(images, prompts, metadata): images = (images * 255).round().clamp(0, 255).to(torch.uint8) scores = scorer(images) return scores, {} return _fn # list of example prompts to feed stable diffusion animals = [ "cat", "dog", "horse", "monkey", "rabbit", "zebra", "spider", "bird", "sheep", "deer", "cow", "goat", "lion", "frog", "chicken", "duck", "goose", "bee", "pig", "turkey", "fly", "llama", "camel", "bat", "gorilla", "hedgehog", "kangaroo", ] def prompt_fn(): return np.random.choice(animals), {} def image_outputs_logger(image_data, global_step, accelerate_logger): # For the sake of this example, we will only log the last batch of images # and associated data result = {} images, prompts, _, rewards, _ = image_data[-1] for i, image in enumerate(images): prompt = prompts[i] reward = rewards[i].item() result[f"{prompt:.25} | {reward:.2f}"] = image.unsqueeze(0) accelerate_logger.log_images( result, step=global_step, ) if __name__ == "__main__": parser = HfArgumentParser((ScriptArguments, DDPOConfig)) args, ddpo_config = parser.parse_args_into_dataclasses() ddpo_config.project_kwargs = { "logging_dir": "./logs", "automatic_checkpoint_naming": True, "total_limit": 5, "project_dir": "./save", } pipeline = DefaultDDPOStableDiffusionPipeline( args.pretrained_model, pretrained_model_revision=args.pretrained_revision, use_lora=args.use_lora ) trainer = DDPOTrainer( ddpo_config, aesthetic_scorer(args.hf_hub_aesthetic_model_id, args.hf_hub_aesthetic_model_filename), prompt_fn, pipeline, image_samples_hook=image_outputs_logger, ) trainer.train() trainer.push_to_hub(args.hf_hub_model_id)
trl/examples/scripts/ddpo.py/0
{ "file_path": "trl/examples/scripts/ddpo.py", "repo_id": "trl", "token_count": 2703 }
407
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import itertools import tempfile import unittest import torch from accelerate.utils.memory import release_memory from datasets import load_dataset from parameterized import parameterized from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TrainingArguments from trl import SFTTrainer, is_peft_available from trl.models.utils import setup_chat_format from ..testing_utils import require_bitsandbytes, require_peft, require_torch_gpu, require_torch_multi_gpu from .testing_constants import DEVICE_MAP_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS, MODELS_TO_TEST, PACKING_OPTIONS if is_peft_available(): from peft import LoraConfig, PeftModel @require_torch_gpu class SFTTrainerSlowTester(unittest.TestCase): @classmethod def setUpClass(cls): cls.train_dataset = load_dataset("imdb", split="train[:10%]") cls.eval_dataset = load_dataset("imdb", split="test[:10%]") cls.dataset_text_field = "text" cls.max_seq_length = 128 cls.peft_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=8, bias="none", task_type="CAUSAL_LM", ) def tearDown(self): gc.collect() torch.cuda.empty_cache() gc.collect() @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) def test_sft_trainer_str(self, model_name, packing): """ Simply tests if passing a simple str to `SFTTrainer` loads and runs the trainer as expected. """ with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, ) trainer = SFTTrainer( model_name, args=args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, ) trainer.train() @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) def test_sft_trainer_transformers(self, model_name, packing): """ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer as expected. """ with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, ) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) @require_peft def test_sft_trainer_peft(self, model_name, packing): """ Simply tests if passing a transformers model + peft config to `SFTTrainer` loads and runs the trainer as expected. """ with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, peft_config=self.peft_config, ) assert isinstance(trainer.model, PeftModel) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) def test_sft_trainer_transformers_mp(self, model_name, packing): """ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer as expected in mixed precision. """ with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, # this is sufficient to enable amp ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, ) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS))) def test_sft_trainer_transformers_mp_gc(self, model_name, packing, gradient_checkpointing_kwargs): """ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer as expected in mixed precision + different scenarios of gradient_checkpointing. """ with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, # this is sufficient to enable amp gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, ) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS))) @require_peft def test_sft_trainer_transformers_mp_gc_peft(self, model_name, packing, gradient_checkpointing_kwargs): """ Simply tests if passing a transformers model + PEFT to `SFTTrainer` loads and runs the trainer as expected in mixed precision + different scenarios of gradient_checkpointing. """ with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, # this is sufficient to enable amp gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, peft_config=self.peft_config, ) assert isinstance(trainer.model, PeftModel) trainer.train() release_memory(model, trainer) @parameterized.expand( list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS, DEVICE_MAP_OPTIONS)) ) @require_torch_multi_gpu def test_sft_trainer_transformers_mp_gc_device_map( self, model_name, packing, gradient_checkpointing_kwargs, device_map ): """ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer as expected in mixed precision + different scenarios of gradient_checkpointing (single, multi-gpu, etc). """ with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, # this is sufficient to enable amp gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) model = AutoModelForCausalLM.from_pretrained(model_name, device_map=device_map) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, ) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS))) @require_peft @require_bitsandbytes def test_sft_trainer_transformers_mp_gc_peft_qlora(self, model_name, packing, gradient_checkpointing_kwargs): """ Simply tests if passing a transformers model + PEFT + bnb to `SFTTrainer` loads and runs the trainer as expected in mixed precision + different scenarios of gradient_checkpointing. """ with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, # this is sufficient to enable amp gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16) model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, peft_config=self.peft_config, ) assert isinstance(trainer.model, PeftModel) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) @require_peft @require_bitsandbytes def test_sft_trainer_with_chat_format_qlora(self, model_name, packing): """ Simply tests if using setup_chat_format with a transformers model + peft + bnb config to `SFTTrainer` loads and runs the trainer as expected. """ with tempfile.TemporaryDirectory() as tmp_dir: train_dataset = load_dataset("trl-internal-testing/dolly-chatml-sft", split="train") args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, ) quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16) model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config) tokenizer = AutoTokenizer.from_pretrained(model_name) model, tokenizer = setup_chat_format(model, tokenizer) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=train_dataset, packing=packing, max_seq_length=self.max_seq_length, peft_config=self.peft_config, ) assert isinstance(trainer.model, PeftModel) trainer.train() release_memory(model, trainer)
trl/tests/slow/test_sft_slow.py/0
{ "file_path": "trl/tests/slow/test_sft_slow.py", "repo_id": "trl", "token_count": 7186 }
408
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import tempfile import unittest import numpy as np import pytest import torch from datasets import Dataset from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments from trl import SFTTrainer from trl.import_utils import is_peft_available from trl.trainer import ConstantLengthDataset, DataCollatorForCompletionOnlyLM from .testing_utils import require_peft def formatting_prompts_func(example): text = f"### Question: {example['question']}\n ### Answer: {example['answer']}" return text def formatting_prompts_func_batched(example): output_text = [] for i, question in enumerate(example["question"]): text = f"### Question: {question}\n ### Answer: {example['answer'][i]}" output_text.append(text) return output_text if is_peft_available(): from peft import LoraConfig, PeftModel class SFTTrainerTester(unittest.TestCase): r""" """ @classmethod def setUpClass(cls): cls.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" cls.model = AutoModelForCausalLM.from_pretrained(cls.model_id) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_id) cls.tokenizer.pad_token = cls.tokenizer.eos_token cls.dummy_dataset = Dataset.from_dict( { "question": [ "Does llamas know how to code?", "Does llamas know how to fly?", "Does llamas know how to talk?", "Does llamas know how to code?", "Does llamas know how to fly?", "Does llamas know how to talk?", "Does llamas know how to swim?", ], "answer": [ "Yes, llamas are very good at coding.", "No, llamas can't fly.", "Yes, llamas are very good at talking.", "Yes, llamas are very good at coding.", "No, llamas can't fly.", "Yes, llamas are very good at talking.", "No, llamas can't swim.", ], "text": [ "### Question: Does llamas know how to code?\n ### Answer: Yes, llamas are very good at coding.", "### Question: Does llamas know how to fly?\n ### Answer: No, llamas can't fly.", "### Question: Does llamas know how to talk?\n ### Answer: Yes, llamas are very good at talking.", "### Question: Does llamas know how to code?\n ### Answer: Yes, llamas are very good at coding.", "### Question: Does llamas know how to fly?\n ### Answer: No, llamas can't fly.", "### Question: Does llamas know how to talk?\n ### Answer: Yes, llamas are very good at talking.", "### Question: Does llamas know how to swim?\n ### Answer: No, llamas can't swim.", ], } ) cls.dummy_chatml_dataset = Dataset.from_dict( { "messages": [ [ {"role": "system", "content": "You are helpful"}, {"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi, how can I help you?"}, {"role": "user", "content": "What is 2+2?"}, {"role": "assistant", "content": "4"}, {"role": "user", "content": "What is 3+3?"}, {"role": "assistant", "content": "6"}, ], [ {"role": "system", "content": "You are helpful"}, {"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi, how can I help you?"}, ], ] } ) cls.dummy_instruction_dataset = Dataset.from_list( [ {"prompt": "What is 2+2?", "completion": "4"}, {"prompt": "What is 3+3?", "completion": "6"}, {"prompt": "What is 4+4?", "completion": "8"}, {"prompt": "What is 2+2?", "completion": "4"}, {"prompt": "What is 3+3?", "completion": "6"}, {"prompt": "What is 4+4?", "completion": "8"}, {"prompt": "What is 2+2?", "completion": "4"}, {"prompt": "What is 3+3?", "completion": "6"}, {"prompt": "What is 4+4?", "completion": "8"}, {"prompt": "What is 2+2?", "completion": "4"}, {"prompt": "What is 3+3?", "completion": "6"}, {"prompt": "What is 4+4?", "completion": "8"}, ] ) cls.train_dataset = ConstantLengthDataset( cls.tokenizer, cls.dummy_dataset, dataset_text_field=None, formatting_func=formatting_prompts_func, seq_length=16, num_of_sequences=16, ) cls.eval_dataset = ConstantLengthDataset( cls.tokenizer, cls.dummy_dataset, dataset_text_field=None, formatting_func=formatting_prompts_func, seq_length=16, num_of_sequences=16, ) def test_constant_length_dataset(self): formatted_dataset = ConstantLengthDataset( self.tokenizer, self.dummy_dataset, dataset_text_field=None, formatting_func=formatting_prompts_func, ) assert len(formatted_dataset) == len(self.dummy_dataset) assert len(formatted_dataset) > 0 for example in formatted_dataset: assert "input_ids" in example assert "labels" in example assert len(example["input_ids"]) == formatted_dataset.seq_length assert len(example["labels"]) == formatted_dataset.seq_length decoded_text = self.tokenizer.decode(example["input_ids"]) assert ("Question" in decoded_text) and ("Answer" in decoded_text) def test_sft_trainer(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=4, eval_steps=2, save_steps=2, per_device_train_batch_size=2, ) trainer = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=True, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") def test_sft_trainer_uncorrect_data(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, eval_steps=1, save_steps=1, per_device_train_batch_size=2, ) with pytest.raises(ValueError): _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, packing=True, ) # this should work since the dummy chatml include the correct format _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_chatml_dataset, max_seq_length=32, # make sure there is at least 1 packed sequence num_of_sequences=32, packing=True, ) _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_chatml_dataset, packing=False, ) # this should work since the dummy instruction dataset is the correct format _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_instruction_dataset, max_seq_length=16, # make sure there is at least 1 packed sequence packing=True, ) _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_instruction_dataset, packing=False, ) # This should work _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, formatting_func=formatting_prompts_func, max_seq_length=32, # make sure there is at least 1 packed sequence packing=True, ) with pytest.raises(ValueError): # This should not work because not enough data for one sample _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, formatting_func=formatting_prompts_func, max_seq_length=1024, # make sure there is NOT at least 1 packed sequence packing=True, ) # This should not work as well with pytest.raises(ValueError): _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, formatting_func=formatting_prompts_func, packing=False, ) # but this should work _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, formatting_func=formatting_prompts_func_batched, packing=False, ) def test_sft_trainer_with_model_num_train_epochs(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, eval_steps=1, save_steps=1, num_train_epochs=2, per_device_train_batch_size=2, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=True, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, save_steps=1, num_train_epochs=2, per_device_train_batch_size=2, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, dataset_text_field="text", max_seq_length=16, num_of_sequences=16, packing=True, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, save_steps=1, num_train_epochs=2, per_device_train_batch_size=2, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, dataset_text_field="text", max_seq_length=16, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-1") def test_sft_trainer_with_model(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, eval_steps=1, save_steps=1, per_device_train_batch_size=2, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=True, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, save_steps=1, per_device_train_batch_size=2, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, dataset_text_field="text", max_seq_length=16, num_of_sequences=16, packing=True, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") # with formatting_func + packed with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, save_steps=1, per_device_train_batch_size=2, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, formatting_func=formatting_prompts_func, max_seq_length=16, num_of_sequences=16, packing=True, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") # with formatting_func + packed with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, save_steps=1, per_device_train_batch_size=2, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, formatting_func=formatting_prompts_func_batched, max_seq_length=16, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, save_steps=1, per_device_train_batch_size=2, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, dataset_text_field="text", max_seq_length=16, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-1") def test_sft_trainer_with_multiple_eval_datasets(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=1, eval_steps=1, save_steps=1, per_device_train_batch_size=2, ) trainer = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.train_dataset, eval_dataset={ "data1": self.eval_dataset, "data2": self.eval_dataset, }, packing=True, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_data1_loss"] is not None assert trainer.state.log_history[1]["eval_data2_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-1") def test_data_collator_completion_lm(self): response_template = "### Response:\n" data_collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=self.tokenizer, mlm=False) text = """\n\n### Instructions:\nHello all this should be masked\n\n### Response:\nI have not been masked correctly.""" encoded_text = self.tokenizer(text) examples = [encoded_text] batch = data_collator(examples) labels = batch["labels"] last_pad_idx = np.where(labels == -100)[1][-1] result_text = self.tokenizer.decode(batch["input_ids"][0, last_pad_idx + 1 :]) assert result_text == "I have not been masked correctly." def test_data_collator_completion_lm_with_multiple_text(self): tokenizer = copy.deepcopy(self.tokenizer) tokenizer.padding_side = "left" response_template = "### Response:\n" data_collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=tokenizer, mlm=False) text1 = """\n\n### Instructions:\nHello all this should be masked\n\n### Response:\nI have not been masked correctly.""" text2 = """\n\n### Instructions:\nThis is another longer text that should also be masked. This text is significantly longer than the previous one.\n\n### Response:\nI have not been masked correctly.""" encoded_text1 = tokenizer(text1) encoded_text2 = tokenizer(text2) examples = [encoded_text1, encoded_text2] batch = data_collator(examples) for i in range(2): labels = batch["labels"][i] last_pad_idx = np.where(labels == -100)[0][-1] result_text = tokenizer.decode(batch["input_ids"][i, last_pad_idx + 1 :]) assert result_text == "I have not been masked correctly." def test_data_collator_chat_completion_lm(self): instruction_template = "### Human:" assistant_template = "### Assistant:" data_collator = DataCollatorForCompletionOnlyLM( response_template=assistant_template, instruction_template=instruction_template, tokenizer=self.tokenizer, mlm=False, ) text = """### Human: Hello all this should be masked.### Assistant: I should not be masked.### Human: All this should be masked too.### Assistant: I should not be masked too.""" encoded_text = self.tokenizer(text) examples = [encoded_text] batch = data_collator(examples) labels = batch["labels"] non_masked_tokens = batch["input_ids"][labels != -100] result_text = self.tokenizer.decode(non_masked_tokens) assert result_text == " I should not be masked. I should not be masked too." def test_data_collator_chat_completion_lm_with_multiple_text(self): tokenizer = copy.deepcopy(self.tokenizer) tokenizer.padding_side = "left" instruction_template = "### Human:" assistant_template = "### Assistant:" data_collator = DataCollatorForCompletionOnlyLM( response_template=assistant_template, instruction_template=instruction_template, tokenizer=tokenizer, mlm=False, ) text1 = """### Human: Hello all this should be masked.### Assistant: I should not be masked.""" text2 = """### Human: Hello all this should be masked.### Assistant: I should not be masked.### Human: All this should be masked too.### Assistant: I should not be masked too.""" encoded_text1 = tokenizer(text1) encoded_text2 = tokenizer(text2) examples = [encoded_text1, encoded_text2] batch = data_collator(examples) labels = batch["labels"] input_ids = batch["input_ids"] non_masked_tokens1 = input_ids[0][labels[0] != -100] result_text1 = tokenizer.decode(non_masked_tokens1) assert result_text1 == " I should not be masked." non_masked_tokens2 = input_ids[1][labels[1] != -100] result_text2 = tokenizer.decode(non_masked_tokens2) assert result_text2 == " I should not be masked. I should not be masked too." def test_sft_trainer_infinite_with_model(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=5, eval_steps=1, save_steps=1, per_device_train_batch_size=2, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=True, max_seq_length=500, ) assert trainer.train_dataset.infinite trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_loss"] is not None # make sure the trainer did 5 steps assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-5") def test_sft_trainer_infinite_with_model_epochs(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, num_train_epochs=1, per_device_train_batch_size=2, save_strategy="epoch", ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=True, max_seq_length=500, ) assert not trainer.train_dataset.infinite trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None # make sure the trainer did 5 steps assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-4") def test_sft_trainer_with_model_neftune(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, eval_steps=1, save_steps=1, per_device_train_batch_size=2, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, neftune_noise_alpha=5, packing=True, ) trainer.model = trainer._trl_activate_neftune(trainer.model) device = trainer.model.get_input_embeddings().weight.device trainer.model.train() torch.random.manual_seed(42) embeds_neftune = trainer.model.get_input_embeddings()(torch.LongTensor([[1, 0, 1]]).to(device)) torch.random.manual_seed(24) embeds_neftune_2 = trainer.model.get_input_embeddings()(torch.LongTensor([[1, 0, 1]]).to(device)) assert not torch.allclose(embeds_neftune, embeds_neftune_2) assert len(trainer.model.get_input_embeddings()._forward_hooks) > 0 trainer.neftune_hook_handle.remove() trainer.train() # Make sure forward pass works fine _ = trainer.model(torch.LongTensor([[1, 0, 1]]).to(device)) assert len(trainer.model.get_input_embeddings()._forward_hooks) == 0 @require_peft def test_peft_sft_trainer_str(self): peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) _ = SFTTrainer( model=self.model_id, args=None, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, peft_config=peft_config, packing=True, ) @require_peft def test_peft_sft_trainer(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=4, eval_steps=2, save_steps=2, per_device_train_batch_size=2, ) peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) trainer = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, peft_config=peft_config, packing=True, ) assert isinstance(trainer.model, PeftModel) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_loss"] is not None assert "adapter_model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") assert "adapter_config.json" in os.listdir(tmp_dir + "/checkpoint-2") assert "model.safetensors" not in os.listdir(tmp_dir + "/checkpoint-2") @require_peft def test_peft_sft_trainer_gc(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=4, eval_steps=2, save_steps=2, per_device_train_batch_size=2, gradient_checkpointing=True, ) peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) trainer = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, peft_config=peft_config, packing=True, ) assert isinstance(trainer.model, PeftModel) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_loss"] is not None assert "adapter_model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") assert "adapter_config.json" in os.listdir(tmp_dir + "/checkpoint-2") assert "model.safetensors" not in os.listdir(tmp_dir + "/checkpoint-2") @require_peft def test_peft_sft_trainer_neftune(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=4, eval_steps=2, save_steps=2, per_device_train_batch_size=2, ) peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) trainer = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, peft_config=peft_config, neftune_noise_alpha=5, packing=True, ) trainer.model = trainer._trl_activate_neftune(trainer.model) assert isinstance(trainer.model, PeftModel) device = trainer.model.get_input_embeddings().weight.device trainer.model.train() torch.random.manual_seed(42) embeds_neftune = trainer.model.get_input_embeddings()(torch.LongTensor([[1, 0, 1]]).to(device)) torch.random.manual_seed(24) embeds_neftune_2 = trainer.model.get_input_embeddings()(torch.LongTensor([[1, 0, 1]]).to(device)) assert not torch.allclose(embeds_neftune, embeds_neftune_2) assert len(trainer.model.get_input_embeddings()._forward_hooks) > 0 trainer.neftune_hook_handle.remove() trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_loss"] is not None assert "adapter_model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") assert "adapter_config.json" in os.listdir(tmp_dir + "/checkpoint-2") assert "model.safetensors" not in os.listdir(tmp_dir + "/checkpoint-2") # Make sure forward pass works fine to check if embeddings forward is not broken. _ = trainer.model(torch.LongTensor([[1, 0, 1]]).to(device)) assert len(trainer.model.get_input_embeddings()._forward_hooks) == 0
trl/tests/test_sft_trainer.py/0
{ "file_path": "trl/tests/test_sft_trainer.py", "repo_id": "trl", "token_count": 17089 }
409
# flake8: noqa # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # There is a circular import in the PPOTrainer if we let isort sort these # isort: off from .utils import ( AdaptiveKLController, FixedKLController, ConstantLengthDataset, DataCollatorForCompletionOnlyLM, RunningMoments, disable_dropout_in_model, peft_module_casting_to_bf16, ) # isort: on from ..import_utils import is_diffusers_available from .base import BaseTrainer from .ddpo_config import DDPOConfig if is_diffusers_available(): from .ddpo_trainer import DDPOTrainer from .dpo_trainer import DPOTrainer from .iterative_sft_trainer import IterativeSFTTrainer from .model_config import ModelConfig from .ppo_config import PPOConfig from .ppo_trainer import PPOTrainer from .reward_config import RewardConfig from .reward_trainer import RewardTrainer, compute_accuracy from .sft_trainer import SFTTrainer
trl/trl/trainer/__init__.py/0
{ "file_path": "trl/trl/trainer/__init__.py", "repo_id": "trl", "token_count": 450 }
410
// File only needed for VSCode users to have proper Docker based interpreters { "name": "accelerate_dev_environment", "build": { // ACTION NEEDED: comment/uncomment the relevant line depending on whether you are in a CPU/GPU environment "dockerfile": "../docker/accelerate-cpu/Dockerfile" // "dockerfile": "../docker/accelerate-gpu/Dockerfile" }, "runArgs": [ // ACTION NEEDED: uncomment the next line if your local machine has GPUs available // "--gpus", "all", // Enable the docker container to access system resources "--ipc", "host" ], "remoteEnv": { "PYTHONPATH": "${containerEnv:PATH}:${containerWorkspaceFolder}" }, "customizations": { "vscode": { "extensions": [ // Ensure we have IntelliSense in VSCode when running inside container "ms-python.python" ] } }, "workspaceFolder": "/workspaces/accelerate", // Need git for VSCode to color code modifications. Only runs when building environment. "onCreateCommand": "apt-get update && apt-get install -y git && pip install -e '.[dev]'" }
accelerate/.devcontainer/devcontainer.json/0
{ "file_path": "accelerate/.devcontainer/devcontainer.json", "repo_id": "accelerate", "token_count": 459 }
0
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import time import torch import transformers from measures_util import end_measure, log_measures, start_measure from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer from accelerate.utils import compute_module_sizes DEFAULT_MODELS = { "gpt-j-6b": {"is_causal": True, "model": "sgugger/sharded-gpt-j-6B", "tokenizer": "EleutherAI/gpt-j-6B"}, "gpt-neox": {"is_causal": True, "model": "EleutherAI/gpt-neox-20b"}, "opt": {"is_causal": True, "model": "facebook/opt-30b"}, "T0pp": {"is_causal": False, "model": "bigscience/T0pp", "model_revision": "sharded"}, } PROMPTS = [ "Hello, my name is", "Are unicorns real? Unicorns are", "For the first time in several years,", "My name is Julien and I am", "The goal of life is", "Whenever I'm sad, I like to", ] def parse_args(): parser = argparse.ArgumentParser(description="Run and time generations on a big model using Accelerate.") parser.add_argument("model_name", type=str, default=None, help="The name of the model to try.") parser.add_argument( "--tokenizer_name", type=str, default=None, help="The name of the tokenizer (if different from the model." ) parser.add_argument("--is_causal", type=bool, default=None, help="Whether or not the model is causal.") parser.add_argument( "--model_revision", type=str, default=None, help="The revision to use for the model checkpoint." ) parser.add_argument("--torch_dtype", type=str, default=None, help="The dtype for the model.") parser.add_argument("--disk_offload", action="store_true") args = parser.parse_args() # Sanitize args if args.model_name in DEFAULT_MODELS: defaults = DEFAULT_MODELS[args.model_name] args.model_name = defaults["model"] if args.tokenizer_name is None: args.tokenizer_name = defaults.get("tokenizer", args.model_name) if args.is_causal is None: args.is_causal = defaults["is_causal"] if args.model_revision is None: args.model_revision = defaults.get("model_revision", "main") if args.is_causal is None: raise ValueError("Could not infer the default for `--is_causal`, pass either True or False for it.") if args.tokenizer_name is None: args.tokenizer_name = args.model_name if args.model_revision is None: args.model_revision = "main" return args def main(): transformers.utils.logging.set_verbosity_error() args = parse_args() if args.torch_dtype is None: config = AutoConfig.from_pretrained(args.model_name) torch_dtype = getattr(config, "torch_dtype", torch.float32) else: torch_dtype = getattr(torch, args.torch_dtype) model_cls = AutoModelForCausalLM if args.is_causal else AutoModelForSeq2SeqLM kwargs = { "torch_dtype": torch_dtype, "revision": args.model_revision, } if args.disk_offload: kwargs["offload_folder"] = "tmp_offload" kwargs["offload_state_dict"] = True start_measures = start_measure() model = model_cls.from_pretrained(args.model_name, device_map="auto", **kwargs) end_measures = end_measure(start_measures) log_measures(end_measures, "Model loading") module_sizes = compute_module_sizes(model) device_size = {v: 0 for v in model.hf_device_map.values()} for module, device in model.hf_device_map.items(): device_size[device] += module_sizes[module] message = "\n".join([f"- {device}: {size // 2**20}MiB" for device, size in device_size.items()]) print(f"\nTheoretical use:\n{message}") tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name) start_measures = start_measure() generation_times = [] gen_tokens = [] texts_outs = [] for prompt in PROMPTS: inputs = tokenizer(prompt, return_tensors="pt").to(0) tokens = inputs["input_ids"][0].tolist() before_generate = time.time() outputs = model.generate(inputs["input_ids"]) after_generate = time.time() outputs = outputs[0].tolist() num_gen_tokens = len(outputs) if outputs[: len(tokens)] != tokens else len(outputs) - len(tokens) generation_time = after_generate - before_generate text_out = tokenizer.decode(outputs, skip_special_tokens=True) texts_outs.append(text_out) generation_times.append(generation_time) gen_tokens.append(num_gen_tokens) print(f"Prompt: {prompt}\nGeneration {text_out}\nIn {generation_time:.2f}s for {num_gen_tokens} tokens\n") end_measures = end_measure(start_measures) log_measures(end_measures, "Model generation") generation_times_per_token = [gen / tok for gen, tok in zip(generation_times, gen_tokens)] avg_gen = sum(generation_times_per_token) / len(generation_times) print(f"Average time of generation per token: {avg_gen:.2f}s") print(f"First generation (avg time per token): {generation_times_per_token[0]:.2f}s") avg_gen = sum(generation_times_per_token[1:]) / (len(generation_times_per_token) - 1) print(f"Average time of generation per token (excluding the first): {avg_gen:.2f}s") if __name__ == "__main__": main()
accelerate/benchmarks/big_model_inference.py/0
{ "file_path": "accelerate/benchmarks/big_model_inference.py", "repo_id": "accelerate", "token_count": 2241 }
1
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 🤗 Accelerate's internal mechanisms Internally, 🤗 Accelerate works by first analyzing the environment in which the script is launched to determine which kind of distributed setup is used, how many different processes there are and which one the current script is in. All that information is stored in the [`~AcceleratorState`]. This class is initialized the first time you instantiate an [`~Accelerator`] as well as performing any specific initialization your distributed setup needs. Its state is then uniquely shared through all instances of [`~state.AcceleratorState`]. (The same can also be done with the [`PartialState`], a more barebones version it inherits) Then, when calling [`~Accelerator.prepare`], the library: - wraps your model(s) in the container adapted for the distributed setup, - wraps your optimizer(s) in an [`~optimizer.AcceleratedOptimizer`], - wraps your scheduler(s) in an [`~scheduler.AcceleratedScheduler`] - creates a new version of your dataloader(s) in a [`~data_loader.DataLoaderShard`] or [`~data_loader.DataLoaderDispatcher`] While the model(s), optimizer(s), and scheduler(s) are just put in simple wrappers, the dataloader(s) are re-created. This is mostly because PyTorch does not let the user change the `batch_sampler` of a dataloader once it's been created and the library handles the sharding of your data between processes by changing that `batch_sampler` to yield every other `num_processes` batches (if enabled). The [`~data_loader.DataLoaderShard`] subclasses `DataLoader` to add the following functionality: - it synchronizes the appropriate random number generator of all processes at each new iteration, to ensure any randomization (like shuffling) is done the exact same way across processes. - it puts the batches on the proper device before yielding them (unless you have opted out of `device_placement=True`). The [`~data_loader.DataLoaderDispatcher`] subclasses differs from the [`~data_loader.DataLoaderShard`] in that when iterating through the `DataLoader`, the data is all starting from process 0 and *then* split and sent off to each process rather than it happening at the dataset level. The random number generator synchronization will by default synchronize: - the `generator` attribute of a given sampler (like the PyTorch `RandomSampler`) for PyTorch >= 1.6 - the main random number generator in PyTorch <=1.5.1 You can choose which random number generator(s) to synchronize with the `rng_types` argument of the main [`Accelerator`]. In PyTorch >= 1.6, it is recommended to rely on a local `generator` to avoid setting the same seed in the main random number generator in all processes. <Tip warning={true}> Synchronization of the main torch (or CUDA or XLA) random number generator will affect any other potential random artifacts you could have in your dataset (like random data augmentation) in the sense that all processes will get the same random numbers from the torch random modules (so will apply the same random data augmentation if it's controlled by torch). </Tip> <Tip> The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local `torch.Generator` object (in PyTorch >= 1.6), see the traditional `RandomSampler`, as an example. </Tip> For more details about the internals, see the [Internals page](package_reference/torch_wrappers).
accelerate/docs/source/concept_guides/internal_mechanism.md/0
{ "file_path": "accelerate/docs/source/concept_guides/internal_mechanism.md", "repo_id": "accelerate", "token_count": 1096 }
2
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Stateful Classes Below are variations of a [singleton class](https://en.wikipedia.org/wiki/Singleton_pattern) in the sense that all instances share the same state, which is initialized on the first instantiation. These classes are immutable and store information about certain configurations or states. [[autodoc]] state.PartialState [[autodoc]] state.AcceleratorState [[autodoc]] state.GradientState
accelerate/docs/source/package_reference/state.md/0
{ "file_path": "accelerate/docs/source/package_reference/state.md", "repo_id": "accelerate", "token_count": 291 }
3
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Understanding how big of a model can fit on your machine One very difficult aspect when exploring potential models to use on your machine is knowing just how big of a model will *fit* into memory with your current graphics card (such as loading the model onto CUDA). To help alleviate this, 🤗 Accelerate has a CLI interface through `accelerate estimate-memory`. This tutorial will help walk you through using it, what to expect, and at the end link to the interactive demo hosted on the 🤗 Hub which will even let you post those results directly on the model repo! Currently we support searching for models that can be used in `timm` and `transformers`. <Tip> This API will load the model into memory on the `meta` device, so we are not actually downloading and loading the full weights of the model into memory, nor do we need to. As a result it's perfectly fine to measure 8 billion parameter models (or more), without having to worry about if your CPU can handle it! </Tip> ## Gradio Demos Below are a few gradio demos related to what was described above. The first is the official Hugging Face memory estimation space, utilizing Accelerate directly: <div class="block dark:hidden"> <iframe src="https://hf-accelerate-model-memory-usage.hf.space?__theme=light" width="850" height="1600" ></iframe> </div> <div class="hidden dark:block"> <iframe src="https://hf-accelerate-model-memory-usage.hf.space?__theme=dark" width="850" height="1600" ></iframe> </div> A community member has taken the idea and expended it further, allowing you to filter models directly and see if you can run a particular LLM given GPU constraints and LoRA configurations. To play with it, see [here](https://huggingface.co/spaces/Vokturz/can-it-run-llm) for more details. ## The Command When using `accelerate estimate-memory`, you need to pass in the name of the model you want to use, potentially the framework that model utilizing (if it can't be found automatically), and the data types you want the model to be loaded in with. For example, here is how we can calculate the memory footprint for `bert-base-cased`: ```bash accelerate estimate-memory bert-base-cased ``` This will download the `config.json` for `bert-based-cased`, load the model on the `meta` device, and report back how much space it will use: Memory Usage for loading `bert-base-cased`: | dtype | Largest Layer | Total Size | Training using Adam | |---------|---------------|------------|---------------------| | float32 | 84.95 MB | 418.18 MB | 1.61 GB | | float16 | 42.47 MB | 206.59 MB | 826.36 MB | | int8 | 21.24 MB | 103.29 MB | 413.18 MB | | int4 | 10.62 MB | 51.65 MB | 206.59 MB | By default it will return all the supported dtypes (`int4` through `float32`), but if you are interested in specific ones these can be filtered. ### Specific libraries If the source library cannot be determined automatically (like it could in the case of `bert-base-cased`), a library name can be passed in. ```bash accelerate estimate-memory HuggingFaceM4/idefics-80b-instruct --library_name transformers ``` Memory Usage for loading `HuggingFaceM4/idefics-80b-instruct`: | dtype | Largest Layer | Total Size | Training using Adam | |---------|---------------|------------|---------------------| | float32 | 3.02 GB | 297.12 GB | 1.16 TB | | float16 | 1.51 GB | 148.56 GB | 594.24 GB | | int8 | 772.52 MB | 74.28 GB | 297.12 GB | | int4 | 386.26 MB | 37.14 GB | 148.56 GB | ```bash accelerate estimate-memory timm/resnet50.a1_in1k --library_name timm ``` Memory Usage for loading `timm/resnet50.a1_in1k`: | dtype | Largest Layer | Total Size | Training using Adam | |---------|---------------|------------|---------------------| | float32 | 9.0 MB | 97.7 MB | 390.78 MB | | float16 | 4.5 MB | 48.85 MB | 195.39 MB | | int8 | 2.25 MB | 24.42 MB | 97.7 MB | | int4 | 1.12 MB | 12.21 MB | 48.85 MB | ### Specific dtypes As mentioned earlier, while we return `int4` through `float32` by default, any dtype can be used from `float32`, `float16`, `int8`, and `int4`. To do so, pass them in after specifying `--dtypes`: ```bash accelerate estimate-memory bert-base-cased --dtypes float32 float16 ``` Memory Usage for loading `bert-base-cased`: | dtype | Largest Layer | Total Size | Training using Adam | |---------|---------------|------------|---------------------| | float32 | 84.95 MB | 413.18 MB | 1.61 GB | | float16 | 42.47 MB | 206.59 MB | 826.36 MB | ## Caveats with this calculator This calculator will tell you how much memory is needed to purely load the model in, *not* to perform inference. This calculation is accurate within a few % of the actual value, so it is a very good view of just how much memory it will take. For instance loading `bert-base-cased` actually takes `413.68 MB` when loaded on CUDA in full precision, and the calculator estimates `413.18 MB`. When performing inference you can expect to add up to an additional 20% as found by [EleutherAI](https://blog.eleuther.ai/transformer-math/). We'll be conducting research into finding a more accurate estimate to these values, and will update this calculator once done.
accelerate/docs/source/usage_guides/model_size_estimator.md/0
{ "file_path": "accelerate/docs/source/usage_guides/model_size_estimator.md", "repo_id": "accelerate", "token_count": 2029 }
4
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset without using HuggingFace Trainer. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import argparse import json import logging import math import os import random from itertools import chain from pathlib import Path import datasets import torch import transformers from datasets import load_dataset from huggingface_hub import Repository from torch.utils.data import DataLoader from tqdm.auto import tqdm from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version from accelerate import Accelerator, DistributedType from accelerate.logging import get_logger from accelerate.utils import MegatronLMDummyScheduler, set_seed # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.23.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--validation_split_percentage", default=5, help="The percentage of the train set used as validation set in case there's no validation split", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) parser.add_argument( "--block_size", type=int, default=None, help=( "Optional input sequence length after tokenization. The training dataset will be truncated in block of" " this size for training. Default to the model max input length for single sentence inputs (take into" " account special tokens)." ), ) parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using TXT files." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"`, `"comet_ml"`, and `"dvclive"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) args = parser.parse_args() # Sanity checks if args.dataset_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args def main(): args = parse_args() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_clm_no_trainer", args) # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment accelerator_log_kwargs = {} if args.with_tracking: accelerator_log_kwargs["log_with"] = args.report_to accelerator_log_kwargs["logging_dir"] = args.output_dir accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id repo = Repository(args.output_dir, clone_from=repo_name) with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( args.dataset_name, args.dataset_config_name, split=f"train[:{args.validation_split_percentage}%]", ) raw_datasets["train"] = load_dataset( args.dataset_name, args.dataset_config_name, split=f"train[{args.validation_split_percentage}%:]", ) else: data_files = {} dataset_args = {} if args.train_file is not None: data_files["train"] = args.train_file if args.validation_file is not None: data_files["validation"] = args.validation_file extension = args.train_file.split(".")[-1] if extension == "txt": extension = "text" dataset_args["keep_linebreaks"] = not args.no_keep_linebreaks raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args) # If no validation data is there, validation_split_percentage will be used to divide the dataset. if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( extension, data_files=data_files, split=f"train[:{args.validation_split_percentage}%]", **dataset_args, ) raw_datasets["train"] = load_dataset( extension, data_files=data_files, split=f"train[{args.validation_split_percentage}%:]", **dataset_args, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if args.config_name: config = AutoConfig.from_pretrained(args.config_name) elif args.model_name_or_path: config = AutoConfig.from_pretrained(args.model_name_or_path) else: config = CONFIG_MAPPING[args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer) elif args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if args.model_name_or_path: model = AutoModelForCausalLM.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) else: logger.info("Training new model from scratch") model = AutoModelForCausalLM.from_config(config) model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. column_names = raw_datasets["train"].column_names text_column_name = "text" if "text" in column_names else column_names[0] def tokenize_function(examples): return tokenizer(examples[text_column_name]) with accelerator.main_process_first(): tokenized_datasets = raw_datasets.map( tokenize_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on dataset", ) if args.block_size is None: block_size = tokenizer.model_max_length if block_size > 1024: logger.warning( f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " "Picking 1024 instead. You can change that default value by passing --block_size xxx." ) block_size = 1024 else: if args.block_size > tokenizer.model_max_length: logger.warning( f"The block_size passed ({args.block_size}) is larger than the maximum length for the model" f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." ) block_size = min(args.block_size, tokenizer.model_max_length) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= block_size: total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower # to preprocess. # # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map with accelerator.main_process_first(): lm_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=args.preprocessing_num_workers, load_from_cache_file=not args.overwrite_cache, desc=f"Grouping texts in chunks of {block_size}", ) train_dataset = lm_datasets["train"] eval_dataset = lm_datasets["validation"] # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # DataLoaders creation: train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size ) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "layer_norm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True # New Code # For Megatron-LM, we need to use `MegatronLMDummyScheduler` instead of regular schedulers if accelerator.distributed_type == DistributedType.MEGATRON_LM: lr_scheduler = MegatronLMDummyScheduler( optimizer=optimizer, total_num_steps=args.max_train_steps, warmup_num_steps=args.num_warmup_steps, ) else: lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties. if accelerator.distributed_type == DistributedType.TPU: model.tie_weights() # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states checkpointing_steps = args.checkpointing_steps if checkpointing_steps is not None and checkpointing_steps.isdigit(): checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("clm_no_trainer", experiment_config) # Train! # New Code # For Megatron-LM, we need to get `global_batch_size` from megatron_lm_plugin # as it handles the specifics related to data parallelism, tensor model parallelism and pipeline parallelism if accelerator.distributed_type == DistributedType.MEGATRON_LM: total_batch_size = accelerator.state.megatron_lm_plugin.global_batch_size else: total_batch_size = ( args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps ) logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None else: # need to multiply `gradient_accumulation_steps` to reflect real steps resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) # update the progress_bar if load from checkpoint progress_bar.update(starting_epoch * num_update_steps_per_epoch) completed_steps = starting_epoch * num_update_steps_per_epoch for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 for step, batch in enumerate(train_dataloader): # We need to skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == starting_epoch: if resume_step is not None and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) completed_steps += 1 continue with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0: output_dir = f"step_{completed_steps }" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if completed_steps >= args.max_train_steps: break model.eval() losses = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) loss = outputs.loss # New Code # For Megatron-LM, the losses are already averaged across the data parallel group if accelerator.distributed_type == DistributedType.MEGATRON_LM: losses.append(loss) else: losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size))) try: if accelerator.distributed_type == DistributedType.MEGATRON_LM: losses = torch.tensor(losses) else: losses = torch.cat(losses) eval_loss = torch.mean(losses) perplexity = math.exp(eval_loss) except OverflowError: perplexity = float("inf") logger.info(f"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}") if args.with_tracking: accelerator.log( { "perplexity": perplexity, "eval_loss": eval_loss, "train_loss": total_loss.item() / len(train_dataloader), "epoch": epoch, "step": completed_steps, }, step=completed_steps, ) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) repo.push_to_hub( commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True ) if args.checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) # this is causing some issue with Megatron-LM when using `wandb` at the end of the main function. # Everything works fine inspite of commenting this out. (wandb finishes/closes the run without error) # if args.with_tracking: # accelerator.end_training() if args.output_dir is not None: accelerator.wait_for_everyone() # New Code # For Megatron-LM, we need to save the model using `accelerator.save_state` if accelerator.distributed_type == DistributedType.MEGATRON_LM: accelerator.save_state(args.output_dir) else: unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True) with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: json.dump({"perplexity": perplexity}, f) if __name__ == "__main__": main()
accelerate/examples/by_feature/megatron_lm_gpt_pretraining.py/0
{ "file_path": "accelerate/examples/by_feature/megatron_lm_gpt_pretraining.py", "repo_id": "accelerate", "token_count": 12439 }
5
#!/bin/bash #SBATCH --job-name=multinode #SBATCH -D . #SBATCH --output=O-%x.%j #SBATCH --error=E-%x.%j #SBATCH --nodes=4 # number of nodes #SBATCH --ntasks-per-node=1 # number of MP tasks #SBATCH --gres=gpu:4 # number of GPUs per node #SBATCH --cpus-per-task=160 # number of cores per tasks #SBATCH --time=01:59:00 # maximum execution time (HH:MM:SS) ###################### ### Set enviroment ### ###################### source activateEnviroment.sh export GPUS_PER_NODE=4 ###################### ###################### #### Set network ##### ###################### head_node_ip=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) ###################### export LAUNCHER="accelerate launch \ --num_processes $((SLURM_NNODES * GPUS_PER_NODE)) \ --num_machines $SLURM_NNODES \ --rdzv_backend c10d \ --main_process_ip $head_node_ip \ --main_process_port 29500 \ " export SCRIPT="/accelerate/examples/complete_nlp_example.py" export SCRIPT_ARGS=" \ --mixed_precision fp16 \ --output_dir /accelerate/examples/output \ " # This step is necessary because accelerate launch does not handle multiline arguments properly export CMD="$LAUNCHER $PYTHON_FILE $ARGS" srun $CMD
accelerate/examples/slurm/submit_multinode.sh/0
{ "file_path": "accelerate/examples/slurm/submit_multinode.sh", "repo_id": "accelerate", "token_count": 519 }
6
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from ...utils import ( ComputeEnvironment, DistributedType, is_deepspeed_available, is_mps_available, is_npu_available, is_transformers_available, is_xpu_available, ) from ...utils.constants import ( DEEPSPEED_MULTINODE_LAUNCHERS, FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, TORCH_DYNAMO_MODES, ) from .config_args import ClusterConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_distributed_mode, _convert_dynamo_backend, _convert_mixed_precision, _convert_yes_no_to_bool, ) def get_cluster_input(): distributed_type = _ask_options( "Which type of machine are you using?", ["No distributed training", "multi-CPU", "multi-XPU", "multi-GPU", "multi-NPU", "TPU"], _convert_distributed_mode, ) machine_rank = 0 num_machines = 1 num_processes = 1 gpu_ids = None main_process_ip = None main_process_port = None rdzv_backend = "static" same_network = True debug = False if distributed_type in [ DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU, ]: num_machines = _ask_field( "How many different machines will you use (use more than 1 for multi-node training)? [1]: ", int, default=1, ) if num_machines > 1: machine_rank = _ask_options( "What is the rank of this machine?", list(range(num_machines)), int, ) main_process_ip = _ask_field( "What is the IP address of the machine that will host the main process? ", ) main_process_port = _ask_field( "What is the port you will use to communicate with the main process? ", int, ) same_network = _ask_field( "Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) if not same_network: rdzv_backend = _ask_field( "What rendezvous backend will you use? ('static', 'c10d', ...): ", default="static" ) debug = _ask_field( "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if distributed_type == DistributedType.NO: use_cpu = _ask_field( "Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)? [yes/NO]:", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) elif distributed_type == DistributedType.MULTI_CPU: use_cpu = True else: use_cpu = False ipex_config = {} if use_cpu: ipex_config["ipex"] = _ask_field( "Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if ( not use_cpu and is_xpu_available() and distributed_type not in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.TPU] ): ipex_config["use_xpu"] = _ask_field( "Do you want to use XPU plugin to speed up training on XPU? [yes/NO]:", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) dynamo_config = {} use_dynamo = _ask_field( "Do you wish to optimize your script with torch dynamo?[yes/NO]:", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_dynamo: prefix = "dynamo_" dynamo_config[prefix + "backend"] = _ask_options( "Which dynamo backend would you like to use?", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) use_custom_options = _ask_field( "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_custom_options: dynamo_config[prefix + "mode"] = _ask_options( "Which mode do you want to use?", TORCH_DYNAMO_MODES, lambda x: TORCH_DYNAMO_MODES[int(x)], default=0, ) dynamo_config[prefix + "use_fullgraph"] = _ask_field( "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) dynamo_config[prefix + "use_dynamic"] = _ask_field( "Do you want to enable dynamic shape tracing? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) use_mps = not use_cpu and is_mps_available() deepspeed_config = {} if ( distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_XPU, DistributedType.MULTI_NPU, DistributedType.NO] and not use_mps ): use_deepspeed = _ask_field( "Do you want to use DeepSpeed? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_deepspeed: distributed_type = DistributedType.DEEPSPEED assert ( is_deepspeed_available() ), "DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source" if distributed_type == DistributedType.DEEPSPEED: use_deepspeed_config = _ask_field( "Do you want to specify a json file to a DeepSpeed config? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_deepspeed_config: deepspeed_config["deepspeed_config_file"] = _ask_field( "Please enter the path to the json DeepSpeed config file: ", str, default="none", ) else: deepspeed_config["zero_stage"] = _ask_options( "What should be your DeepSpeed's ZeRO optimization stage?", [0, 1, 2, 3], int, default=2, ) deepspeed_devices = ["none", "cpu", "nvme"] if deepspeed_config["zero_stage"] >= 2: deepspeed_config["offload_optimizer_device"] = _ask_options( "Where to offload optimizer states?", deepspeed_devices, lambda x: deepspeed_devices[int(x)] ) deepspeed_config["offload_param_device"] = _ask_options( "Where to offload parameters?", deepspeed_devices, lambda x: deepspeed_devices[int(x)] ) if deepspeed_config["offload_param_device"] == "nvme": deepspeed_config["offload_param_nvme_path"] = _ask_field( "Nvme Path to offload parameters?", str, default="/nvme", ) if deepspeed_config["offload_optimizer_device"] == "nvme": deepspeed_config["offload_optimizer_nvme_path"] = _ask_field( "Nvme Path to offload optimizer states?", str, default="/nvme", ) deepspeed_config["gradient_accumulation_steps"] = _ask_field( "How many gradient accumulation steps you're passing in your script? [1]: ", int, default=1, ) use_gradient_clipping = _ask_field( "Do you want to use gradient clipping? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_gradient_clipping: deepspeed_config["gradient_clipping"] = _ask_field( "What is the gradient clipping value? [1.0]: ", float, default=1.0, ) if deepspeed_config["zero_stage"] == 3: deepspeed_config["zero3_save_16bit_model"] = _ask_field( "Do you want to save 16-bit model weights when using ZeRO Stage-3? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) deepspeed_config["zero3_init_flag"] = _ask_field( "Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if deepspeed_config["zero3_init_flag"]: if not is_transformers_available(): raise Exception( "When `zero3_init_flag` is set, it requires Transformers to be installed. " "Please run `pip3 install transformers`." ) if num_machines > 1: launcher_query = "Which Type of launcher do you want to use?" deepspeed_config["deepspeed_multinode_launcher"] = _ask_options( launcher_query, DEEPSPEED_MULTINODE_LAUNCHERS, lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)], ) if deepspeed_config["deepspeed_multinode_launcher"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]: deepspeed_config["deepspeed_hostfile"] = _ask_field( "DeepSpeed configures multi-node compute resources with hostfile. " "Each row is of the format `hostname slots=[num_gpus]`, e.g., `localhost slots=2`; " "for more information please refer official [documentation]" "(https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). " "Please specify the location of hostfile: ", str, ) is_exclusion_filter = _ask_field( "Do you want to specify exclusion filter string? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if is_exclusion_filter: deepspeed_config["deepspeed_exclusion_filter"] = _ask_field( "DeepSpeed exclusion filter string: ", str, ) is_inclusion_filter = _ask_field( "Do you want to specify inclusion filter string? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if is_inclusion_filter: deepspeed_config["deepspeed_inclusion_filter"] = _ask_field( "DeepSpeed inclusion filter string: ", str, ) fsdp_config = {} if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU]: use_fsdp = _ask_field( "Do you want to use FullyShardedDataParallel? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_fsdp: distributed_type = DistributedType.FSDP if distributed_type == DistributedType.FSDP: sharding_strategy_query = "What should be your sharding strategy?" fsdp_config["fsdp_sharding_strategy"] = _ask_options( sharding_strategy_query, FSDP_SHARDING_STRATEGY, lambda x: FSDP_SHARDING_STRATEGY[int(x)], ) fsdp_config["fsdp_offload_params"] = _ask_field( "Do you want to offload parameters and gradients to CPU? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) fsdp_wrap_query = "What should be your auto wrap policy?" fsdp_config["fsdp_auto_wrap_policy"] = _ask_options( fsdp_wrap_query, FSDP_AUTO_WRAP_POLICY, lambda x: FSDP_AUTO_WRAP_POLICY[int(x)], ) if fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[0]: use_no_split_modules = _ask_field( "Do you want to use the model's `_no_split_modules` to wrap. Only applicable for 🤗 Transformers [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if not use_no_split_modules: fsdp_config["fsdp_transformer_layer_cls_to_wrap"] = _ask_field( "Specify the comma-separated list of transformer layer class names (case-sensitive) to wrap ,e.g, :" "`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput` ...? : ", str, ) elif fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[1]: fsdp_config["fsdp_min_num_params"] = _ask_field( "What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: ", int, default=100000000, ) fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy?" fsdp_config["fsdp_backward_prefetch"] = _ask_options( fsdp_backward_prefetch_query, FSDP_BACKWARD_PREFETCH, lambda x: FSDP_BACKWARD_PREFETCH[int(x)], ) fsdp_state_dict_type_query = "What should be your FSDP's state dict type?" fsdp_config["fsdp_state_dict_type"] = _ask_options( fsdp_state_dict_type_query, FSDP_STATE_DICT_TYPE, lambda x: FSDP_STATE_DICT_TYPE[int(x)], default=2, ) fsdp_config["fsdp_forward_prefetch"] = _ask_field( "Do you want to enable FSDP's forward prefetch policy? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) fsdp_config["fsdp_use_orig_params"] = _ask_field( "Do you want to enable FSDP's `use_orig_params` feature? [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) fsdp_config["fsdp_cpu_ram_efficient_loading"] = _ask_field( "Do you want to enable CPU RAM efficient model loading? Only applicable for 🤗 Transformers models. [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) if fsdp_config["fsdp_cpu_ram_efficient_loading"]: fsdp_config["fsdp_sync_module_states"] = True else: fsdp_config["fsdp_sync_module_states"] = _ask_field( "Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) megatron_lm_config = {} if distributed_type in [DistributedType.MULTI_GPU]: use_megatron_lm = _ask_field( "Do you want to use Megatron-LM ? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_megatron_lm: distributed_type = DistributedType.MEGATRON_LM if distributed_type == DistributedType.MEGATRON_LM: prefix = "megatron_lm_" megatron_lm_config[prefix + "tp_degree"] = _ask_field( "What is the Tensor Parallelism degree/size? [1]:", int, default=1, error_message="Please enter an integer.", ) if megatron_lm_config[prefix + "tp_degree"] > 1: megatron_lm_config[prefix + "sequence_parallelism"] = _ask_field( "Do you want to enable Sequence Parallelism? [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) megatron_lm_config[prefix + "pp_degree"] = _ask_field( "What is the Pipeline Parallelism degree/size? [1]:", int, default=1, error_message="Please enter an integer.", ) if megatron_lm_config[prefix + "pp_degree"] > 1: megatron_lm_config[prefix + "num_micro_batches"] = _ask_field( "What is the number of micro-batches? [1]:", int, default=1, error_message="Please enter an integer.", ) megatron_lm_config[prefix + "recompute_activations"] = _ask_field( "Do you want to enable selective activation recomputation? [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) megatron_lm_config[prefix + "use_distributed_optimizer"] = _ask_field( "Do you want to use distributed optimizer " "which shards optimizer state and gradients across data parallel ranks? [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) megatron_lm_config[prefix + "gradient_clipping"] = _ask_field( "What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: ", float, default=1.0, ) # TPU specific defaults tpu_commands = None tpu_command_file = None tpu_downcast_bf16 = "no" tpu_env = [] tpu_name = None tpu_vm = None tpu_zone = None tpu_use_sudo = False tpu_use_cluster = False if distributed_type in [ DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.TPU, ]: machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "") if machine_type == "TPU": machine_type += " cores" else: machine_type += "(s)" num_processes = _ask_field( f"How many {machine_type} should be used for distributed training? [1]:", int, default=1, error_message="Please enter an integer.", ) elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: num_processes = _ask_field( "How many GPU(s) should be used for distributed training? [1]:", int, default=1, error_message="Please enter an integer.", ) else: num_processes = 1 if (distributed_type == DistributedType.MULTI_GPU) and (num_machines == 1) and (num_processes == 1): raise ValueError( f"Specified distributed type {distributed_type} but only using 1 GPU on a single machine. Please select `No distributed training` for the type of machine you are using." ) if ( distributed_type in [ DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.NO, ] and not use_cpu and not use_mps ): if is_npu_available(): machine_type = "NPU(s)" else: machine_type = "GPU(s)" gpu_ids = _ask_field( f"What {machine_type} (by id) should be used for training on this machine as a comma-seperated list? [all]:", default="all", ) if distributed_type == DistributedType.TPU: mixed_precision = "no" main_training_function = _ask_field( "What is the name of the function in your script that should be launched in all parallel scripts? [main]: ", default="main", ) tpu_use_cluster = _ask_field( "Are you using a TPU cluster? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if tpu_use_cluster: tpu_name = _ask_field( "What is the name of your TPU cluster? ", default=None, error_message="Please enter the name of your TPU cluster.", ) tpu_zone = _ask_field( "What is the zone of your TPU cluster? ", default=None, error_message="Please enter the zone of your TPU cluster.", ) tpu_use_sudo = _ask_field( "To run a python script in a TPU pod, should `sudo` be used? [yes/NO]: ", default=False, error_message="Please enter yes or no.", ) run_commands = _ask_field( "Do you have code you wish to run on startup in each pod? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if run_commands: use_command_file = _ask_field( "Is this code located in a bash script? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_command_file: tpu_command_file = _ask_field( "What is the path to your bash script? ", default=None, error_message="Please enter the path to your bash script.", ) tpu_command_file = os.path.abspath(tpu_command_file) else: print("Please enter each command seperately you wish to run on startup in each pod.") tpu_commands = [] another_command = True while another_command: tpu_commands.append( _ask_field( "Please enter a single command to be ran ", default=None, error_message="Please enter the commands you wish to run on startup in each pod as a single string.", ) ) another_command = _ask_field( "Do you wish to add another command? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) tpu_vm = _ask_field( "If not using an instance group, what are the names of the Compute VM instances to be used, seperated by a comma: ", default="", ).split(",") tpu_env = _ask_field( "What environment variables do you wish to set in each pod, seperated by a comma: ", default="", ).split(",") else: main_training_function = "main" if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config: mixed_precision = None else: mixed_precision = _ask_options( "Do you wish to use FP16 or BF16 (mixed precision)?", ["no", "fp16", "bf16", "fp8"], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no" and not use_cpu: print( "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." ) if distributed_type == DistributedType.TPU and mixed_precision == "bf16": tpu_downcast_bf16 = _ask_field( "Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no" ) return ClusterConfig( compute_environment=ComputeEnvironment.LOCAL_MACHINE, distributed_type=distributed_type, num_processes=num_processes, gpu_ids=gpu_ids, mixed_precision=mixed_precision, downcast_bf16=tpu_downcast_bf16, machine_rank=machine_rank, num_machines=num_machines, main_process_ip=main_process_ip, main_process_port=main_process_port, main_training_function=main_training_function, deepspeed_config=deepspeed_config, fsdp_config=fsdp_config, megatron_lm_config=megatron_lm_config, ipex_config=ipex_config, use_cpu=use_cpu, rdzv_backend=rdzv_backend, same_network=same_network, commands=tpu_commands, command_file=tpu_command_file, tpu_env=tpu_env, tpu_name=tpu_name, tpu_vm=tpu_vm, tpu_zone=tpu_zone, tpu_use_sudo=tpu_use_sudo, tpu_use_cluster=tpu_use_cluster, dynamo_config=dynamo_config, debug=debug, )
accelerate/src/accelerate/commands/config/cluster.py/0
{ "file_path": "accelerate/src/accelerate/commands/config/cluster.py", "repo_id": "accelerate", "token_count": 14546 }
7
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def test_command_parser(subparsers=None): if subparsers is not None: parser = subparsers.add_parser("test") else: parser = argparse.ArgumentParser("Accelerate test command") parser.add_argument( "--config_file", default=None, help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ), ) if subparsers is not None: parser.set_defaults(func=test_command) return parser def test_command(args): script_name = os.path.sep.join(__file__.split(os.path.sep)[:-2] + ["test_utils", "scripts", "test_script.py"]) if args.config_file is None: test_args = script_name else: test_args = f"--config_file={args.config_file} {script_name}" cmd = ["accelerate-launch"] + test_args.split() result = execute_subprocess_async(cmd, env=os.environ.copy()) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!") def main(): parser = test_command_parser() args = parser.parse_args() test_command(args) if __name__ == "__main__": main()
accelerate/src/accelerate/commands/test.py/0
{ "file_path": "accelerate/src/accelerate/commands/test.py", "repo_id": "accelerate", "token_count": 773 }
8
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader, IterableDataset from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.data_loader import DataLoaderDispatcher from accelerate.test_utils import RegressionDataset, RegressionModel, torch_device from accelerate.utils import set_seed os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "true" class ListHandler(logging.Handler): def __init__(self, *args, **kwargs): super(ListHandler, self).__init__(*args, **kwargs) self.logs = [] def emit(self, record): self.logs.append(record) def get_basic_setup(accelerator, num_samples=82, batch_size=16): "Returns everything needed to perform basic training" set_seed(42) model = RegressionModel() ddp_model = deepcopy(model) dset = RegressionDataset(length=num_samples) dataloader = DataLoader(dset, batch_size=batch_size) model.to(accelerator.device) ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader) return model, ddp_model, dataloader def get_dataloader(accelerator: Accelerator, use_longest=False): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased") dataset = load_dataset("glue", "mrpc", split="validation") def tokenize_function(examples): outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs with accelerator.main_process_first(): tokenized_datasets = dataset.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): if use_longest: return tokenizer.pad(examples, padding="longest", return_tensors="pt") return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") return DataLoader(tokenized_datasets, shuffle=False, collate_fn=collate_fn, batch_size=16) def get_mrpc_setup(dispatch_batches, split_batches): accelerator = Accelerator(dispatch_batches=dispatch_batches, split_batches=split_batches) dataloader = get_dataloader(accelerator, not dispatch_batches) model = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased", return_dict=True ) ddp_model, ddp_dataloader = accelerator.prepare(model, dataloader) return { "ddp": [ddp_model, ddp_dataloader, torch_device], "no": [model, dataloader, accelerator.device], }, accelerator def generate_predictions(model, dataloader, accelerator): logits_and_targets = [] for batch in dataloader: input, target = batch.values() with torch.no_grad(): logit = model(input) logit, target = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) logits, targs = [], [] for logit, targ in logits_and_targets: logits.append(logit) targs.append(targ) logits, targs = torch.cat(logits), torch.cat(targs) return logits, targs def test_torch_metrics( accelerator: Accelerator, num_samples=82, dispatch_batches=False, split_batches=False, batch_size=16 ): model, ddp_model, dataloader = get_basic_setup(accelerator, num_samples, batch_size) logits, targs = generate_predictions(ddp_model, dataloader, accelerator) assert ( len(logits) == num_samples ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(logits)}" def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False): metric = evaluate.load("glue", "mrpc") setup, accelerator = get_mrpc_setup(dispatch_batches, split_batches) # First do baseline model, dataloader, device = setup["no"] model.to(device) model.eval() for batch in dataloader: batch.to(device) with torch.inference_mode(): outputs = model(**batch) preds = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=preds, references=batch["labels"]) baseline = metric.compute() # Then do distributed model, dataloader, device = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): outputs = model(**batch) preds = outputs.logits.argmax(dim=-1) references = batch["labels"] preds, references = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=preds, references=references) distributed = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key], distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" def test_gather_for_metrics_with_non_tensor_objects_iterable_dataset(): class DummyIterableDataset(IterableDataset): def __init__(self, data): self.data = data def __len__(self): return len(self.data) def __iter__(self): for element in self.data: yield element iterable_dataset = DummyIterableDataset([n for n in range(30)]) dataloader = DataLoader(iterable_dataset, batch_size=4) accelerator = Accelerator() prepared_dataloader = accelerator.prepare(dataloader) if accelerator.is_main_process: logger = logging.root.manager.loggerDict["accelerate.accelerator"] list_handler = ListHandler() logger.addHandler(list_handler) batches_for_metrics = [] for batch in prepared_dataloader: batches_for_metrics.append(accelerator.gather_for_metrics(batch)) assert torch.cat(batches_for_metrics).size(0) == 30 if accelerator.is_main_process: assert len(list_handler.logs) == 0 logger.removeHandler(list_handler) def test_gather_for_metrics_with_iterable_dataset(): class DummyIterableDataset(IterableDataset): def __init__(self, data): self.data = data def __len__(self): return len(self.data) def __iter__(self): for element in self.data: yield element iterable_dataset = DummyIterableDataset(torch.as_tensor(range(30))) dataloader = DataLoader(iterable_dataset, batch_size=4) accelerator = Accelerator() prepared_dataloader = accelerator.prepare(dataloader) assert isinstance(prepared_dataloader, DataLoaderDispatcher) if accelerator.is_main_process: logger = logging.root.manager.loggerDict["accelerate.accelerator"] list_handler = ListHandler() logger.addHandler(list_handler) batches_for_metrics = [] for batch in prepared_dataloader: batches_for_metrics.append(accelerator.gather_for_metrics(batch)) assert torch.cat(batches_for_metrics).size(0) == 30 if accelerator.is_main_process: assert len(list_handler.logs) == 0 logger.removeHandler(list_handler) def test_gather_for_metrics_drop_last(): accelerator = Accelerator() per_device_batch_size = 5 num_items = (10 * accelerator.num_processes) + 1 dataloader = DataLoader(range(num_items), batch_size=per_device_batch_size, drop_last=True) dataloader = accelerator.prepare(dataloader) iterator = iter(dataloader) next(iterator) # Skip first batch tensor([0, 1, 2, 3, 4], device='cuda:0') batch = next(iterator) gathered_items = accelerator.gather_for_metrics(batch) # Should return a full set of complete batches from each GPU num_expected_items = per_device_batch_size * accelerator.num_processes assert gathered_items.size(0) == ( num_expected_items ), f"Expected number of items: {num_expected_items}, Actual: {gathered_items.size(0)}" def main(): accelerator = Accelerator(split_batches=False, dispatch_batches=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if accelerator.device.type != "cpu": if accelerator.is_local_main_process: print("**Testing gather_for_metrics**") for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`") test_mrpc(dispatch_batches, split_batches) accelerator.state._reset_state() print("test_gather_for_metrics_with_iterable_dataset") test_gather_for_metrics_with_iterable_dataset() print("test gather_for_metrics_with_non_tensor_objects_iterable_dataset") test_gather_for_metrics_with_non_tensor_objects_iterable_dataset() if accelerator.is_local_main_process: print("**Test torch metrics**") for split_batches in [True, False]: for dispatch_batches in [True, False]: accelerator = Accelerator(split_batches=split_batches, dispatch_batches=dispatch_batches) if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99") test_torch_metrics(accelerator, 99) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**") accelerator = Accelerator() test_torch_metrics(accelerator, 512) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test that `drop_last` is taken into account**") test_gather_for_metrics_drop_last() accelerator.state._reset_state() def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/external_deps/test_metrics.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/external_deps/test_metrics.py", "repo_id": "accelerate", "token_count": 4313 }
9
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class HfDeepSpeedConfig: """ This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage. A `weakref` of this object is stored in the module's globals to be able to access the config from areas where things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore it's important that this object remains alive while the program is still running. [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic the DeepSpeed configuration is not modified in any way. Args: config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict. """ def __init__(self, config_file_or_dict): if isinstance(config_file_or_dict, dict): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden config = deepcopy(config_file_or_dict) elif os.path.exists(config_file_or_dict): with io.open(config_file_or_dict, "r", encoding="utf-8") as f: config = json.load(f) else: try: config_decoded = base64.urlsafe_b64decode(config_file_or_dict).decode("utf-8") config = json.loads(config_decoded) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}" ) self.config = config self.set_stage_and_offload() def set_stage_and_offload(self): # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. self._stage = self.get_value("zero_optimization.stage", -1) # offload self._offload = False if self.is_zero2() or self.is_zero3(): offload_devices_valid = set(["cpu", "nvme"]) offload_devices = set( [ self.get_value("zero_optimization.offload_optimizer.device"), self.get_value("zero_optimization.offload_param.device"), ] ) if len(offload_devices & offload_devices_valid) > 0: self._offload = True def find_config_node(self, ds_key_long): config = self.config # find the config node of interest if it exists nodes = ds_key_long.split(".") ds_key = nodes.pop() for node in nodes: config = config.get(node) if config is None: return None, ds_key return config, ds_key def get_value(self, ds_key_long, default=None): """ Returns the set value or `default` if no value is set """ config, ds_key = self.find_config_node(ds_key_long) if config is None: return default return config.get(ds_key, default) def del_config_sub_tree(self, ds_key_long, must_exist=False): """ Deletes a sub-section of the config file if it's found. Unless `must_exist` is `True` the section doesn't have to exist. """ config = self.config # find the config node of interest if it exists nodes = ds_key_long.split(".") for node in nodes: parent_config = config config = config.get(node) if config is None: if must_exist: raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}") else: return # if found remove it if parent_config is not None: parent_config.pop(node) def is_true(self, ds_key_long): """ Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very specific question of whether the value is set to `True` (and it's not set to `False`` or isn't set). """ value = self.get_value(ds_key_long) return False if value is None else bool(value) def is_false(self, ds_key_long): """ Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very specific question of whether the value is set to `False` (and it's not set to `True`` or isn't set). """ value = self.get_value(ds_key_long) return False if value is None else not bool(value) def is_zero2(self): return self._stage == 2 def is_zero3(self): return self._stage == 3 def is_offload(self): return self._offload class DeepSpeedEngineWrapper: """ Internal wrapper for deepspeed.runtime.engine.DeepSpeedEngine. This is used to follow conventional training loop. Args: engine (deepspeed.runtime.engine.DeepSpeedEngine): deepspeed engine to wrap """ def __init__(self, engine): self.engine = engine def backward(self, loss, **kwargs): # runs backpropagation and handles mixed precision self.engine.backward(loss, **kwargs) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class DeepSpeedOptimizerWrapper(AcceleratedOptimizer): """ Internal wrapper around a deepspeed optimizer. Args: optimizer (`torch.optim.optimizer.Optimizer`): The optimizer to wrap. """ def __init__(self, optimizer): super().__init__(optimizer, device_placement=False, scaler=None) self.__has_overflow__ = hasattr(self.optimizer, "overflow") def zero_grad(self, set_to_none=None): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def step(self): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def step_was_skipped(self): """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" if self.__has_overflow__: return self.optimizer.overflow return False class DeepSpeedSchedulerWrapper(AcceleratedScheduler): """ Internal wrapper around a deepspeed scheduler. Args: scheduler (`torch.optim.lr_scheduler.LambdaLR`): The scheduler to wrap. optimizers (one or a list of `torch.optim.Optimizer`): """ def __init__(self, scheduler, optimizers): super().__init__(scheduler, optimizers) def step(self): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class DummyOptim: """ Dummy optimizer presents model parameters or param groups, this is primarily used to follow conventional training loop when optimizer config is specified in the deepspeed config file. Args: lr (float): Learning rate. params (iterable): iterable of parameters to optimize or dicts defining parameter groups weight_decay (float): Weight decay. **kwargs: Other arguments. """ def __init__(self, params, lr=0.001, weight_decay=0, **kwargs): self.params = params self.lr = lr self.weight_decay = weight_decay self.kwargs = kwargs class DummyScheduler: """ Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training loop when scheduler config is specified in the deepspeed config file. Args: optimizer (`torch.optim.optimizer.Optimizer`): The optimizer to wrap. total_num_steps (int, *optional*): Total number of steps. warmup_num_steps (int, *optional*): Number of steps for warmup. lr_scheduler_callable (callable, *optional*): A callable function that creates an LR Scheduler. It accepts only one argument `optimizer`. **kwargs: Other arguments. """ def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, lr_scheduler_callable=None, **kwargs): self.optimizer = optimizer self.total_num_steps = total_num_steps self.warmup_num_steps = warmup_num_steps self.lr_scheduler_callable = lr_scheduler_callable self.kwargs = kwargs
accelerate/src/accelerate/utils/deepspeed.py/0
{ "file_path": "accelerate/src/accelerate/utils/deepspeed.py", "repo_id": "accelerate", "token_count": 4000 }
10
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC torch_version = parse(importlib.metadata.version("torch")) def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str): """ Compares a library version to some requirement using a given operation. Args: library_or_version (`str` or `packaging.version.Version`): A library name or a version to check. operation (`str`): A string representation of an operator, such as `">"` or `"<="`. requirement_version (`str`): The version to compare the library version against """ if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}") operation = STR_OPERATION_TO_FUNC[operation] if isinstance(library_or_version, str): library_or_version = parse(importlib.metadata.version(library_or_version)) return operation(library_or_version, parse(requirement_version)) def is_torch_version(operation: str, version: str): """ Compares the current PyTorch version to a given reference with an operation. Args: operation (`str`): A string representation of an operator, such as `">"` or `"<="` version (`str`): A string version of PyTorch """ return compare_versions(torch_version, operation, version)
accelerate/src/accelerate/utils/versions.py/0
{ "file_path": "accelerate/src/accelerate/utils/versions.py", "repo_id": "accelerate", "token_count": 701 }
11
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import os import unittest import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( device_count, execute_subprocess_async, require_cpu, require_multi_device, require_non_cpu, test_sync, ) from accelerate.utils import patch_environment class SyncScheduler(unittest.TestCase): def setUp(self): mod_file = inspect.getfile(accelerate.test_utils) self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_sync.py"]) @require_cpu def test_gradient_sync_cpu_noop(self): debug_launcher(test_sync.main, num_processes=1) @require_cpu def test_gradient_sync_cpu_multi(self): debug_launcher(test_sync.main) @require_non_cpu def test_gradient_sync_gpu(self): test_sync.main() @require_multi_device def test_gradient_sync_gpu_multi(self): print(f"Found {device_count} devices.") cmd = ["torchrun", f"--nproc_per_node={device_count}", self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd, env=os.environ.copy())
accelerate/tests/test_grad_sync.py/0
{ "file_path": "accelerate/tests/test_grad_sync.py", "repo_id": "accelerate", "token_count": 632 }
12
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class MultiTPUTester(unittest.TestCase): def setUp(self): mod_file = inspect.getfile(accelerate.test_utils) self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"]) self.test_dir = os.path.sep.join(inspect.getfile(self.__class__).split(os.path.sep)[:-1]) @require_tpu def test_tpu(self): distributed_args = f""" {self.test_dir}/xla_spawn.py --num_cores 8 {self.test_file_path} """.split() cmd = [sys.executable] + distributed_args execute_subprocess_async(cmd, env=os.environ.copy())
accelerate/tests/test_tpu.py/0
{ "file_path": "accelerate/tests/test_tpu.py", "repo_id": "accelerate", "token_count": 504 }
13
# Model arguments model_name_or_path: mistralai/Mistral-7B-v0.1 model_revision: main torch_dtype: bfloat16 use_flash_attention_2: true # Data training arguments chat_template: "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" dataset_mixer: HuggingFaceH4/grok-conversation-harmless: 0.15 HuggingFaceH4/ultrachat_200k: 1.0 dataset_splits: - train_sft - test_sft preprocessing_num_workers: 12 # SFT trainer config bf16: true do_eval: true do_train: true evaluation_strategy: epoch # One of ["no", "steps", "epoch"] gradient_accumulation_steps: 4 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: False hub_model_id: mistral-7b-sft-constitutional-ai hub_strategy: every_save learning_rate: 2.0e-05 log_level: info logging_steps: 5 logging_strategy: steps lr_scheduler_type: cosine max_seq_length: 2048 max_steps: -1 num_train_epochs: 1 output_dir: data/mistral-7b-sft-constitutional-ai overwrite_output_dir: true per_device_eval_batch_size: 8 per_device_train_batch_size: 8 push_to_hub: true remove_unused_columns: true report_to: - tensorboard save_strategy: "steps" save_steps: 100 save_total_limit: 1 seed: 42 warmup_ratio: 0.1
alignment-handbook/recipes/constitutional-ai/sft/config_grok.yaml/0
{ "file_path": "alignment-handbook/recipes/constitutional-ai/sft/config_grok.yaml", "repo_id": "alignment-handbook", "token_count": 610 }
14
__version__ = "0.3.0.dev0" from .configs import DataArguments, DPOConfig, H4ArgumentParser, ModelArguments, SFTConfig from .data import apply_chat_template, get_datasets from .model_utils import ( get_checkpoint, get_kbit_device_map, get_peft_config, get_quantization_config, get_tokenizer, is_adapter_model, )
alignment-handbook/src/alignment/__init__.py/0
{ "file_path": "alignment-handbook/src/alignment/__init__.py", "repo_id": "alignment-handbook", "token_count": 133 }
15